repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
chainer
|
chainer-master/chainer/distributions/multivariate_normal.py
|
import math
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.array import broadcast
from chainer.functions.array import diagonal
from chainer.functions.array import expand_dims
from chainer.functions.array import squeeze
from chainer.functions.array import stack
from chainer.functions.array import swapaxes
from chainer.functions.array import transpose
from chainer.functions.array import where
from chainer.functions.math import exponential
from chainer.functions.math import matmul
from chainer.functions.math import sum as sum_mod
from chainer.utils import argument
from chainer.utils import cache
from chainer.utils import type_check
try:
import scipy.linalg
available_cpu = True
except ImportError as e:
available_cpu = False
_import_error = e
ENTROPYC = 0.5 * math.log(2 * math.pi * math.e)
LOGPROBC = - 0.5 * math.log(2 * math.pi)
class TriangularInv(chainer.function_node.FunctionNode):
def __init__(self, lower):
self._lower = lower
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
a_type, = in_types
type_check.expect(a_type.dtype == numpy.float32)
# Only 2D array shapes allowed
type_check.expect(a_type.ndim == 2)
# Matrix inversion only allowed for square matrices
type_check.expect(a_type.shape[0] == a_type.shape[1])
def forward_cpu(self, inputs):
self.retain_outputs((0,))
if not available_cpu:
raise ImportError('SciPy is not available. Forward computation'
' of triangular_inv in CPU can not be done.' +
str(_import_error))
x, = inputs
if len(x) == 0:
# linalg.solve_triangular crashes
return x,
invx = scipy.linalg.solve_triangular(
x, numpy.eye(len(x), dtype=x.dtype), lower=self._lower)
return invx,
def forward_gpu(self, inputs):
self.retain_outputs((0,))
x, = inputs
if len(x) == 0:
# linalg.solve_triangular crashes
return x,
invx = cuda.cupyx.scipy.linalg.solve_triangular(
x,
cuda.cupy.eye(len(x), dtype=x.dtype),
lower=self._lower)
return invx,
def backward(self, target_input_indexes, grad_outputs):
gy, = grad_outputs
xp = backend.get_array_module(gy)
invx, = self.get_retained_outputs()
mask = xp.tril(xp.ones((len(invx), len(invx)), dtype=bool))
if not self._lower:
mask = mask.T
# Gradient is - x^-T (dx) x^-T
invxT = chainer.functions.transpose(invx)
gx = chainer.functions.matmul(
chainer.functions.matmul(- invxT, gy), invxT)
gx = where.where(mask, gx, xp.zeros_like(gx.array))
return gx,
def _triangular_inv(x, lower=True):
y, = TriangularInv(lower).apply((x,))
return y
def _batch_triangular_inv(x, lower=True):
n = len(x)
y = []
for i in range(n):
y.append(_triangular_inv(x[i]))
return stack.stack(y)
def _triangular_logdet(x):
diag = diagonal.diagonal(x, axis1=-2, axis2=-1)
return sum_mod.sum(exponential.log(abs(diag)), axis=-1)
class MultivariateNormal(distribution.Distribution):
"""MultivariateNormal Distribution.
The probability density function of the distribution is expressed as
.. math::
p(x;\\mu,V) = \\frac{1}{\\sqrt{\\det(2\\pi V)}}
\\exp\\left(-\\frac{1}{2}(x-\\mu) V^{-1}(x-\\mu)\\right)
Args:
loc (:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution representing the location :math:`\\mu`.
scale_tril (:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution representing the scale :math:`L` such that
:math:`V=LL^T`.
"""
def __init__(self, loc, **kwargs):
scale_tril = None
if kwargs:
scale_tril, = argument.parse_kwargs(
kwargs, ('scale_tril', scale_tril))
if scale_tril is None:
raise ValueError('`scale_tril` must have a value.')
self.__loc = loc
self.__scale_tril = scale_tril
@cache.cached_property
def loc(self):
return chainer.as_variable(self.__loc)
@cache.cached_property
def scale_tril(self):
return chainer.as_variable(self.__scale_tril)
@cache.cached_property
def _logdet_scale(self):
return _triangular_logdet(self.scale_tril)
@property
def d(self):
return self.scale_tril.shape[-1]
def __copy__(self):
return self._copy_to(MultivariateNormal(self.loc, self.scale_tril))
@property
def batch_shape(self):
return self.loc.shape[:-1]
@cache.cached_property
def entropy(self):
return self._logdet_scale + ENTROPYC * self.d
@property
def event_shape(self):
return self.loc.shape[-1:]
@property
def _is_gpu(self):
return isinstance(self.loc.data, cuda.ndarray)
def log_prob(self, x):
scale_tril_inv = _batch_triangular_inv(
self.scale_tril.reshape(-1, self.d, self.d))
scale_tril_inv = scale_tril_inv.reshape(
self.batch_shape+(self.d, self.d))
bsti = broadcast.broadcast_to(scale_tril_inv, x.shape + (self.d,))
bl = broadcast.broadcast_to(self.loc, x.shape)
m = matmul.matmul(
bsti,
expand_dims.expand_dims(x - bl, axis=-1))
m = matmul.matmul(swapaxes.swapaxes(m, -1, -2), m)
m = squeeze.squeeze(m, axis=-1)
m = squeeze.squeeze(m, axis=-1)
logz = LOGPROBC * self.d - self._logdet_scale
return broadcast.broadcast_to(logz, m.shape) - 0.5 * m
@cache.cached_property
def mean(self):
return self.loc
def sample_n(self, n):
if self._is_gpu:
eps = cuda.cupy.random.standard_normal(
(n,)+self.loc.shape+(1,), dtype=self.loc.dtype)
else:
eps = numpy.random.standard_normal(
(n,)+self.loc.shape+(1,)).astype(numpy.float32)
return (
self.loc
+ squeeze.squeeze(matmul.matmul(self.scale_tril, eps), axis=-1))
@property
def support(self):
return 'real'
@property
def params(self):
return {'loc': self.loc, 'scale_tril': self.scale_tril}
@property
def covariance(self):
return matmul.matmul(
self.scale_tril,
transpose.transpose(
self.scale_tril,
tuple(range(len(self.batch_shape))) + (-1, -2)))
@distribution.register_kl(MultivariateNormal, MultivariateNormal)
def _kl_multivariatenormal_multivariatenormal(dist1, dist2):
scale_tril_inv2 = _batch_triangular_inv(
dist2.scale_tril.reshape(-1, dist2.d, dist2.d))
trace = (
sum_mod.sum(
matmul.matmul(
scale_tril_inv2,
dist1.scale_tril.reshape(-1, dist2.d, dist2.d)) ** 2,
axis=(-1, -2))
.reshape(dist1.batch_shape))
mu = dist1.loc - dist2.loc
mah = matmul.matmul(scale_tril_inv2, mu.reshape(-1, dist1.d, 1))
mah = sum_mod.sum(mah ** 2, axis=-2).reshape(dist1.batch_shape)
return (
dist2._logdet_scale
- dist1._logdet_scale
+ 0.5 * trace
+ 0.5 * mah
- 0.5 * dist1.d)
| 7,498
| 29.860082
| 79
|
py
|
chainer
|
chainer-master/chainer/distributions/poisson.py
|
import chainer
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.math import exponential
from chainer.functions.math import lgamma
from chainer import utils
from chainer.utils import cache
class Poisson(distribution.Distribution):
"""Poisson Distribution.
The probability mass function of the distribution is expressed as
.. math::
P(x; \\lambda) = \\frac{\\lambda ^x e^{-\\lambda}}{x!}
Args:
lam(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution. :math:`\\lambda`
"""
def __init__(self, lam):
super(Poisson, self).__init__()
self.__lam = lam
@cache.cached_property
def lam(self):
return chainer.as_variable(self.__lam)
@cache.cached_property
def _log_lam(self):
return exponential.log(self.lam)
@property
def batch_shape(self):
return self.lam.shape
@property
def event_shape(self):
return ()
@property
def _is_gpu(self):
return isinstance(self.lam.data, cuda.ndarray)
def log_prob(self, x):
if isinstance(x, chainer.Variable):
x = x.data
x = x.astype(self.lam.dtype)
xp1 = (x + 1).astype(self.lam.dtype)
x, xp1 = utils.force_array(x), utils.force_array(xp1)
return x * self._log_lam - lgamma.lgamma(xp1) - self.lam
@cache.cached_property
def mean(self):
return self.lam
@property
def params(self):
return {'lam': self.lam}
def sample_n(self, n):
xp = chainer.backend.get_array_module(self.lam)
if xp is cuda.cupy:
eps = xp.random.poisson(
self.lam.data, size=(n,)+self.batch_shape, dtype=xp.float32)
else:
eps = (
xp.random.poisson(self.lam.data, size=(n,)+self.batch_shape)
.astype(xp.float32))
noise = chainer.Variable(eps)
return noise
@property
def support(self):
return 'non negative integer'
@cache.cached_property
def variance(self):
return self.lam
@distribution.register_kl(Poisson, Poisson)
def _kl_poisson_poisson(dist1, dist2):
return (
dist1.lam * (dist1._log_lam - dist2._log_lam)
- dist1.lam
+ dist2.lam)
| 2,317
| 24.472527
| 76
|
py
|
chainer
|
chainer-master/chainer/distributions/laplace.py
|
import math
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.math import exponential
from chainer import utils
from chainer.utils import cache
class LaplaceCDF(chainer.function_node.FunctionNode):
def forward(self, inputs):
x, = inputs
xp = backend.get_array_module(x)
y = 0.5 - 0.5 * xp.sign(x) * xp.expm1(-abs(x))
self.retain_outputs((0,))
return utils.force_array(y, x.dtype),
def backward(self, target_input_indexes, grad_outputs):
gy, = grad_outputs
y, = self.get_retained_outputs()
return (0.5 - abs(y - 0.5)) * gy,
class LaplaceICDF(chainer.function_node.FunctionNode):
def forward(self, inputs):
self.retain_inputs((0,))
x, = inputs
xp = backend.get_array_module(x)
h = 1 - 2 * x
return utils.force_array(xp.sign(h) * xp.log1p(-abs(h)), x.dtype),
def backward(self, target_input_indexes, grad_outputs):
gy, = grad_outputs
x, = self.get_retained_inputs()
return gy / (0.5 - abs(x - 0.5)),
def _laplace_cdf(x):
y, = LaplaceCDF().apply((x,))
return y
def _laplace_icdf(x):
y, = LaplaceICDF().apply((x,))
return y
class Laplace(distribution.Distribution):
"""Laplace Distribution.
The probability density function of the distribution is expressed as
.. math::
p(x;\\mu,b) = \\frac{1}{2b}
\\exp\\left(-\\frac{|x-\\mu|}{b}\\right)
Args:
loc(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution representing the location :math:`\\mu`.
scale(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter
of distribution representing the scale :math:`b`.
"""
def __init__(self, loc, scale):
super(Laplace, self).__init__()
self.__loc = loc
self.__scale = scale
@cache.cached_property
def loc(self):
return chainer.as_variable(self.__loc)
@cache.cached_property
def scale(self):
return chainer.as_variable(self.__scale)
@property
def batch_shape(self):
return self.loc.shape
def cdf(self, x):
return _laplace_cdf((x - self.loc) / self.scale)
@cache.cached_property
def entropy(self):
return 1. + exponential.log(2 * self.scale)
@property
def event_shape(self):
return ()
def icdf(self, x):
return self.loc + self.scale * _laplace_icdf(x)
@property
def _is_gpu(self):
return isinstance(self.loc.data, cuda.ndarray)
def log_prob(self, x):
scale = self.scale
return - exponential.log(2 * scale) - abs(x - self.loc) / scale
@cache.cached_property
def mean(self):
return self.loc
@cache.cached_property
def mode(self):
return self.loc
@property
def params(self):
return {'loc': self.loc, 'scale': self.scale}
def prob(self, x):
scale = self.scale
return 0.5 / scale * exponential.exp(- abs(x - self.loc) / scale)
def sample_n(self, n):
if self._is_gpu:
eps = (
cuda.cupy.random.laplace(size=(n,) + self.loc.shape)
.astype(numpy.float32))
else:
eps = (
numpy.random.laplace(size=(n,) + self.loc.shape)
.astype(numpy.float32))
return self.scale * eps + self.loc
@cache.cached_property
def stddev(self):
return math.sqrt(2) * self.scale
@property
def support(self):
return 'real'
@cache.cached_property
def variance(self):
return 2 * self.scale ** 2
@distribution.register_kl(Laplace, Laplace)
def _kl_laplace_laplace(dist1, dist2):
diff = abs(dist1.loc - dist2.loc)
return (
exponential.log(dist2.scale)
- exponential.log(dist1.scale)
+ diff / dist2.scale
+ dist1.scale / dist2.scale * exponential.exp(-diff / dist1.scale)
- 1)
| 4,060
| 24.38125
| 74
|
py
|
chainer
|
chainer-master/chainer/distributions/categorical.py
|
import numpy
import chainer
from chainer import backend
from chainer import distribution
from chainer.functions.activation import log_softmax
from chainer.functions.math import exponential
from chainer.functions.math import sum as sum_mod
from chainer.utils import argument
from chainer.utils import cache
class Categorical(distribution.Distribution):
"""Categorical Distribution.
The probability mass function of the distribution is expressed as
.. math::
P(x = i; p) = p_i
Args:
p(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution.
logit(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution representing :math:`\\log\\{p\\} + C`. Either `p` or
`logit` (not both) must have a value.
"""
def __init__(self, p=None, **kwargs):
logit = None
if kwargs:
logit, = argument.parse_kwargs(
kwargs, ('logit', logit))
if not (p is None) ^ (logit is None):
raise ValueError(
'Either `p` or `logit` (not both) must have a value.')
self.__p = p
self.__logit = logit
@cache.cached_property
def p(self):
if self.__p is not None:
return chainer.as_variable(self.__p)
else:
return exponential.exp(self.log_p)
@cache.cached_property
def log_p(self):
if self.__p is not None:
return exponential.log(self.__p)
else:
return log_softmax.log_softmax(self.__logit, axis=-1)
@property
def batch_shape(self):
return self.p.shape[:-1]
@property
def event_shape(self):
return ()
@cache.cached_property
def entropy(self):
return - sum_mod.sum(
chainer.distributions.utils._modified_xlogx(self.p), axis=-1)
def log_prob(self, x):
mg = numpy.meshgrid(
*(range(i) for i in self.batch_shape),
indexing='ij')
if isinstance(x, chainer.Variable):
return self.log_p[mg + [x.data.astype(numpy.int32)]]
else:
return self.log_p[mg + [x.astype(numpy.int32)]]
@property
def params(self):
return {'p': self.p}
def sample_n(self, n):
xp = backend.get_array_module(self.p)
onebyone_p = self.p.data.reshape(-1, self.p.shape[-1])
eps = [xp.random.choice(one_p.shape[0], size=(n,), p=one_p)
for one_p in onebyone_p]
eps = xp.vstack(eps).T.reshape((n,)+self.batch_shape)
noise = chainer.Variable(eps)
return noise
@distribution.register_kl(Categorical, Categorical)
def _kl_categorical_categorical(dist1, dist2):
return sum_mod.sum(dist1.p * (dist1.log_p - dist2.log_p), axis=-1)
| 2,792
| 28.4
| 77
|
py
|
chainer
|
chainer-master/chainer/distributions/independent.py
|
import numpy
from chainer.backend import cuda
from chainer import distribution
from chainer.functions.array import repeat
from chainer.functions.array import reshape
from chainer.functions.array import transpose
from chainer.functions.math import prod
from chainer.functions.math import sum as sum_mod
from chainer.utils import array
from chainer.utils import cache
class Independent(distribution.Distribution):
"""Independent distribution.
Args:
distribution (:class:`~chainer.Distribution`): The base distribution
instance to transform.
reinterpreted_batch_ndims (:class:`int`): Integer number of rightmost
batch dims which will be regarded as event dims. When ``None`` all
but the first batch axis (batch axis 0) will be transferred to
event dimensions.
"""
def __init__(self, distribution, reinterpreted_batch_ndims=None):
super(Independent, self).__init__()
self.__distribution = distribution
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = (
self._get_default_reinterpreted_batch_ndims(distribution))
elif reinterpreted_batch_ndims > len(distribution.batch_shape):
raise ValueError(
'reinterpreted_batch_ndims must be less than or equal to the '
'number of dimensions of `distribution.batch_shape`.')
self.__reinterpreted_batch_ndims = reinterpreted_batch_ndims
batch_ndim = (
len(self.distribution.batch_shape)
- self.reinterpreted_batch_ndims)
self.__batch_shape = distribution.batch_shape[:batch_ndim]
self.__event_shape = (
distribution.batch_shape[batch_ndim:]
+ distribution.event_shape)
@property
def distribution(self):
return self.__distribution
@property
def reinterpreted_batch_ndims(self):
return self.__reinterpreted_batch_ndims
@property
def batch_shape(self):
return self.__batch_shape
@property
def event_shape(self):
return self.__event_shape
@cache.cached_property
def covariance(self):
""" The covariance of the independent distribution.
By definition, the covariance of the new
distribution becomes block diagonal matrix. Let
:math:`\\Sigma_{\\mathbf{x}}` be the covariance matrix of the original
random variable :math:`\\mathbf{x} \\in \\mathbb{R}^d`, and
:math:`\\mathbf{x}^{(1)}, \\mathbf{x}^{(2)}, \\cdots \\mathbf{x}^{(m)}`
be the :math:`m` i.i.d. random variables, new covariance matrix
:math:`\\Sigma_{\\mathbf{y}}` of :math:`\\mathbf{y} =
[\\mathbf{x}^{(1)}, \\mathbf{x}^{(2)}, \\cdots, \\mathbf{x}^{(m)}] \\in
\\mathbb{R}^{md}` can be written as
.. math::
\\left[\\begin{array}{ccc}
\\Sigma_{\\mathbf{x}^{1}} & & 0 \\\\
& \\ddots & \\\\
0 & & \\Sigma_{\\mathbf{x}^{m}}
\\end{array} \\right].
Note that this relationship holds only if the covariance matrix of the
original distribution is given analytically.
Returns:
~chainer.Variable: The covariance of the distribution.
"""
n_repeats = array.size_of_shape(
self.distribution.batch_shape[-self.reinterpreted_batch_ndims:])
dim = array.size_of_shape(self.distribution.event_shape)
cov = repeat.repeat(
reshape.reshape(
self.distribution.covariance,
((self.batch_shape) + (1, n_repeats, dim, dim))),
n_repeats, axis=-4)
cov = reshape.reshape(
transpose.transpose(
cov, axes=(
tuple(range(len(self.batch_shape))) + (-4, -2, -3, -1))),
self.batch_shape + (n_repeats * dim, n_repeats * dim))
block_indicator = self.xp.reshape(
self._block_indicator,
tuple([1] * len(self.batch_shape)) + self._block_indicator.shape)
return cov * block_indicator
@property
def entropy(self):
return self._reduce(sum_mod.sum, self.distribution.entropy)
def cdf(self, x):
return self._reduce(prod.prod, self.distribution.cdf(x))
def icdf(self, x):
"""The inverse cumulative distribution function for multivariate variable.
Cumulative distribution function for multivariate variable is not
invertible. This function always raises :class:`RuntimeError`.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Data points in
the codomain of the distribution
Raises:
:class:`RuntimeError`
"""
raise RuntimeError(
'Cumulative distribution function for multivariate variable '
'is not invertible.')
def log_cdf(self, x):
return self._reduce(sum_mod.sum, self.distribution.log_cdf(x))
def log_prob(self, x):
return self._reduce(sum_mod.sum, self.distribution.log_prob(x))
def log_survival_function(self, x):
return self._reduce(
sum_mod.sum, self.distribution.log_survival_function(x))
@property
def mean(self):
return self.distribution.mean
@property
def mode(self):
return self.distribution.mode
@property
def params(self):
return self.distribution.params
def perplexity(self, x):
return self._reduce(prod.prod, self.distribution.perplexity(x))
def prob(self, x):
return self._reduce(prod.prod, self.distribution.prob(x))
def sample_n(self, n):
return self.distribution.sample_n(n)
@property
def stddev(self):
return self.distribution.stddev
@property
def support(self):
return self.distribution.support
def survival_function(self, x):
return self._reduce(prod.prod, self.distribution.survival_function(x))
@property
def variance(self):
return self.distribution.variance
@property
def xp(self):
return self.distribution.xp
def _reduce(self, op, stat):
range_ = tuple(range(-self.reinterpreted_batch_ndims, 0))
return op(stat, axis=range_)
def _get_default_reinterpreted_batch_ndims(self, distribution):
ndims = len(distribution.batch_shape)
return max(0, ndims - 1)
@cache.cached_property
def _block_indicator(self):
n_repeats = array.size_of_shape(
self.distribution.batch_shape[-self.reinterpreted_batch_ndims:])
dim = array.size_of_shape(self.distribution.event_shape)
block_indicator = numpy.fromfunction(
lambda i, j: i // dim == j // dim,
(n_repeats * dim, n_repeats * dim)).astype(int)
if self.xp is cuda.cupy:
block_indicator = cuda.to_gpu(block_indicator)
return block_indicator
@distribution.register_kl(Independent, Independent)
def _kl_independent_independent(dist1, dist2):
"""Computes Kullback-Leibler divergence for independent distributions.
We can leverage the fact that
.. math::
\\mathrm{KL}(
\\mathrm{Independent}(\\mathrm{dist1}) ||
\\mathrm{Independent}(\\mathrm{dist2}))
= \\mathrm{sum}(\\mathrm{KL}(\\mathrm{dist1} || \\mathrm{dist2}))
where the sum is over the ``reinterpreted_batch_ndims``.
Args:
dist1 (:class:`~chainer.distribution.Independent`): Instance of
`Independent`.
dist2 (:class:`~chainer.distribution.Independent`): Instance of
`Independent`.
Returns:
Batchwise ``KL(dist1 || dist2)``.
Raises:
:class:`ValueError`: If the event space for ``dist1`` and ``dist2``,
or their underlying distributions don't match.
"""
p = dist1.distribution
q = dist2.distribution
# The KL between any two (non)-batched distributions is a scalar.
# Given that the KL between two factored distributions is the sum, i.e.
# KL(p1(x)p2(y) || q1(x)q2(y)) = KL(p1 || q1) + KL(q1 || q2), we compute
# KL(p || q) and do a `reduce_sum` on the reinterpreted batch dimensions.
if dist1.event_shape == dist2.event_shape:
if p.event_shape == q.event_shape:
n_axes = len(dist1.event_shape) - len(p.event_shape)
n_reduce_dims = tuple([-i - 1 for i in range(0, n_axes)])
return sum_mod.sum(
distribution.kl_divergence(p, q), axis=n_reduce_dims)
else:
raise NotImplementedError(
'KL between Independents with different '
'event shapes not supported.')
else:
raise ValueError('Event shapes do not match.')
| 8,777
| 34.112
| 82
|
py
|
chainer
|
chainer-master/chainer/distributions/__init__.py
|
"""Collection of distribution implementations."""
from chainer.distributions.bernoulli import Bernoulli # NOQA
from chainer.distributions.beta import Beta # NOQA
from chainer.distributions.categorical import Categorical # NOQA
from chainer.distributions.cauchy import Cauchy # NOQA
from chainer.distributions.chisquare import Chisquare # NOQA
from chainer.distributions.dirichlet import Dirichlet # NOQA
from chainer.distributions.exponential import Exponential # NOQA
from chainer.distributions.gamma import Gamma # NOQA
from chainer.distributions.geometric import Geometric # NOQA
from chainer.distributions.gumbel import Gumbel # NOQA
from chainer.distributions.independent import Independent # NOQA
from chainer.distributions.laplace import Laplace # NOQA
from chainer.distributions.log_normal import LogNormal # NOQA
from chainer.distributions.multivariate_normal import MultivariateNormal # NOQA
from chainer.distributions.normal import Normal # NOQA
from chainer.distributions.one_hot_categorical import OneHotCategorical # NOQA
from chainer.distributions.pareto import Pareto # NOQA
from chainer.distributions.poisson import Poisson # NOQA
from chainer.distributions.uniform import Uniform # NOQA
| 1,225
| 54.727273
| 80
|
py
|
chainer
|
chainer-master/chainer/distributions/gamma.py
|
import chainer
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.array import broadcast
from chainer.functions.array import where
from chainer.functions.math import digamma
from chainer.functions.math import exponential
from chainer.functions.math import lgamma
from chainer.utils import cache
class Gamma(distribution.Distribution):
"""Gamma Distribution.
Args:
k(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution.
theta(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution.
"""
def __init__(self, k, theta):
super(Gamma, self).__init__()
self.__k = k
self.__theta = theta
@cache.cached_property
def k(self):
return chainer.as_variable(self.__k)
@cache.cached_property
def theta(self):
return chainer.as_variable(self.__theta)
@property
def batch_shape(self):
return self.k.shape
@cache.cached_property
def entropy(self):
return (
self.k
+ exponential.log(self.theta)
+ lgamma.lgamma(self.k)
+ (1 - self.k) * digamma.digamma(self.k))
@property
def event_shape(self):
return ()
@property
def _is_gpu(self):
return isinstance(self.k.data, cuda.ndarray)
def log_prob(self, x):
logp = (
- lgamma.lgamma(self.k)
- self.k * exponential.log(self.theta)
+ (self.k - 1) * exponential.log(x)
- x / self.theta)
xp = logp.xp
inf = xp.full_like(logp.array, xp.inf)
if isinstance(x, chainer.Variable):
x = x.array
return where.where(xp.asarray(x >= 0), logp, xp.asarray(-inf))
@cache.cached_property
def mean(self):
return self.k * self.theta
@property
def params(self):
return {'k': self.k, 'theta': self.theta}
def sample_n(self, n):
xp = chainer.backend.get_array_module(self.k)
if xp is cuda.cupy:
eps = xp.random.gamma(
self.k.data, size=(n,) + self.batch_shape, dtype=self.k.dtype)
else:
eps = xp.random.gamma(
self.k.data, size=(n,) + self.batch_shape).astype(self.k.dtype)
noise = broadcast.broadcast_to(self.theta, eps.shape) * eps
return noise
@property
def support(self):
return 'positive'
@cache.cached_property
def variance(self):
return self.mean * self.theta
@distribution.register_kl(Gamma, Gamma)
def _kl_gamma_gamma(dist1, dist2):
return (
(dist1.k - dist2.k) * digamma.digamma(dist1.k)
- (lgamma.lgamma(dist1.k) - lgamma.lgamma(dist2.k))
+ dist2.k * (exponential.log(dist2.theta)
- exponential.log(dist1.theta))
+ dist1.k * (dist1.theta / dist2.theta - 1))
| 2,914
| 27.028846
| 79
|
py
|
chainer
|
chainer-master/chainer/distributions/exponential.py
|
import chainer
from chainer.backends import cuda
from chainer import distribution
from chainer.functions.array import where
from chainer.functions.math import exponential
from chainer.functions.math import exponential_m1
from chainer.functions.math import logarithm_1p
from chainer.utils import cache
class Exponential(distribution.Distribution):
"""Exponential Distribution.
The probability density function of the distribution is expressed as
.. math::
p(x;\\lambda) = \\lambda e^{-\\lambda x}
Args:
lam(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution :math:`\\lambda`.
"""
def __init__(self, lam):
super(Exponential, self).__init__()
self.__lam = lam
@cache.cached_property
def lam(self):
return chainer.as_variable(self.__lam)
@cache.cached_property
def _log_lam(self):
return exponential.log(self.lam)
@property
def batch_shape(self):
return self.lam.shape
def cdf(self, x):
return - exponential_m1.expm1(-self.lam * x)
@cache.cached_property
def entropy(self):
return 1 - self._log_lam
@property
def event_shape(self):
return ()
def icdf(self, x):
x = chainer.as_variable(x)
return -1 / self.lam * logarithm_1p.log1p(-x)
@property
def _is_gpu(self):
return isinstance(self.lam.data, cuda.ndarray)
def log_prob(self, x):
logp = self._log_lam - self.lam * x
xp = logp.xp
if isinstance(x, chainer.Variable):
x = x.array
inf = xp.full_like(logp.array, xp.inf)
return where.where(xp.asarray(x >= 0), logp, xp.asarray(-inf))
@cache.cached_property
def mean(self):
return 1 / self.lam
@property
def params(self):
return {'lam': self.lam}
def sample_n(self, n):
xp = chainer.backend.get_array_module(self.lam)
if xp is cuda.cupy:
eps = xp.random.standard_exponential(
(n,)+self.lam.shape, dtype=self.lam.dtype)
else:
eps = xp.random.standard_exponential(
(n,)+self.lam.shape).astype(self.lam.dtype)
noise = eps / self.lam
return noise
@property
def support(self):
return 'positive'
@cache.cached_property
def variance(self):
return self.mean ** 2
@distribution.register_kl(Exponential, Exponential)
def _kl_exponential_exponential(dist1, dist2):
return (
dist1._log_lam
- dist2._log_lam
+ dist2.lam / dist1.lam
- 1.)
| 2,615
| 24.398058
| 72
|
py
|
chainer
|
chainer-master/chainer/distributions/bernoulli.py
|
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import distribution
import chainer.distributions.utils
from chainer.functions.activation import sigmoid
from chainer.functions.array import where
from chainer.functions.math import exponential
from chainer.functions.math import logarithm_1p
from chainer.functions.math import sum
from chainer import utils
from chainer.utils import cache
class BernoulliLogProb(chainer.function_node.FunctionNode):
def __init__(self, binary_check=False):
super(BernoulliLogProb, self).__init__()
self.binary_check = binary_check
def forward(self, inputs):
logit, x = inputs
self.retain_inputs((0, 1))
xp = backend.get_array_module(x)
y = logit * (x - 1) - xp.log(xp.exp(-logit) + 1)
y = utils.force_array(y)
# extreme logit
logit_isinf = xp.isinf(logit)
self.logit_ispinf = xp.bitwise_and(logit_isinf, logit > 0)
self.logit_isminf = xp.bitwise_and(logit_isinf, logit <= 0)
with numpy.errstate(divide='ignore', invalid='raise'):
y = xp.where(self.logit_ispinf, xp.log(x), y)
y = xp.where(self.logit_isminf, xp.log(1 - x), y)
if self.binary_check:
self.invalid = utils.force_array(xp.bitwise_and(x != 0, x != 1))
y[self.invalid] = -xp.inf
return utils.force_array(y, logit.dtype),
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
logit, x = self.get_retained_inputs()
xp = backend.get_array_module(x)
dlogit = x - 1. / (1. + exponential.exp(-logit))
# extreme logit
nan = xp.array(xp.nan).astype(dlogit.dtype)
logit_isinf = xp.bitwise_or(self.logit_ispinf, self.logit_isminf)
dlogit = where.where(logit_isinf, nan, dlogit)
if self.binary_check:
dlogit = where.where(self.invalid, nan, dlogit)
return sum.sum_to(gy * dlogit, logit.shape), None
def _bernoulli_log_prob(logit, x, binary_check=False):
y, = BernoulliLogProb(binary_check).apply((logit, x))
return y
class Bernoulli(distribution.Distribution):
"""Bernoulli Distribution.
The probability mass function of the distribution is expressed as
.. math::
P(x = 1; p) = p \\\\
P(x = 0; p) = 1 - p
Args:
p(:class:`~chainer.Variable` or :ref:`ndarray`): Parameter of
distribution representing :math:`p`. Either `p` or `logit` (not
both) must have a value.
logit(:class:`~chainer.Variable` or :ref:`ndarray`) Parameter of
distribution representing :math:`\\log\\{p/(1-p)\\}`. Either `p`
or `logit` (not both) must have a value.
"""
def __init__(self, p=None, logit=None, binary_check=False):
super(Bernoulli, self).__init__()
if not (p is None) ^ (logit is None):
raise ValueError(
'Either `p` or `logit` (not both) must have a value.')
self.__p = p
self.__logit = logit
self.binary_check = binary_check
@cache.cached_property
def p(self):
if self.__p is not None:
return chainer.as_variable(self.__p)
else:
return sigmoid.sigmoid(self.logit)
@cache.cached_property
def logit(self):
if self.__logit is not None:
return chainer.as_variable(self.__logit)
else:
return exponential.log(self.p) - logarithm_1p.log1p(-self.p)
@property
def batch_shape(self):
return self.p.shape
@property
def entropy(self):
p = self.p
q = p.dtype.type(1.) - p
return (- chainer.distributions.utils._modified_xlogx(p)
- chainer.distributions.utils._modified_xlogx(q))
@property
def event_shape(self):
return ()
@property
def _is_gpu(self):
return isinstance(self.p.array, cuda.ndarray)
def log_prob(self, x):
return _bernoulli_log_prob(self.logit, x, self.binary_check)
@cache.cached_property
def mean(self):
return self.p
@property
def params(self):
return {'logit': self.logit}
def prob(self, x):
x = chainer.as_variable(x)
prob = x * self.p + (1 - x) * (1 - self.p)
if self.binary_check:
if self._is_gpu:
valid = cuda.cupy.bitwise_or(x.array == 0, x.array == 1)
else:
valid = numpy.bitwise_or(x.array == 0, x.array == 1)
prob *= valid
return prob
def sample_n(self, n):
if self._is_gpu:
eps = cuda.cupy.random.binomial(
1, self.p.array, size=(n,)+self.p.shape)
else:
eps = numpy.random.binomial(
1, self.p.array, size=(n,)+self.p.shape)
return chainer.Variable(eps)
@cache.cached_property
def stddev(self):
return self.variance ** 0.5
@property
def support(self):
return '{0, 1}'
@cache.cached_property
def variance(self):
return self.p * (1 - self.p)
@distribution.register_kl(Bernoulli, Bernoulli)
def _kl_bernoulli_bernoulli(dist1, dist2):
return (
(dist1.logit - dist2.logit) * (dist1.p - 1.)
- exponential.log(exponential.exp(-dist1.logit) + 1)
+ exponential.log(exponential.exp(-dist2.logit) + 1))
| 5,412
| 29.410112
| 76
|
py
|
chainer
|
chainer-master/chainer/dataset/dataset_mixin.py
|
import numpy
import six
class DatasetMixin(object):
"""Default implementation of dataset indexing.
DatasetMixin provides the :meth:`__getitem__` operator. The default
implementation uses :meth:`get_example` to extract each example, and
combines the results into a list. This mixin makes it easy to implement a
new dataset that does not support efficient slicing.
Dataset implementation using DatasetMixin still has to provide the
:meth:`__len__` operator explicitly.
"""
def __getitem__(self, index):
"""Returns an example or a sequence of examples.
It implements the standard Python indexing and one-dimensional integer
array indexing. It uses the :meth:`get_example` method by default, but
it may be overridden by the implementation to, for example, improve the
slicing performance.
Args:
index (int, slice, list or numpy.ndarray): An index of an example
or indexes of examples.
Returns:
If index is int, returns an example created by `get_example`.
If index is either slice or one-dimensional list or numpy.ndarray,
returns a list of examples created by `get_example`.
.. admonition:: Example
>>> import numpy
>>> from chainer import dataset
>>> class SimpleDataset(dataset.DatasetMixin):
... def __init__(self, values):
... self.values = values
... def __len__(self):
... return len(self.values)
... def get_example(self, i):
... return self.values[i]
...
>>> ds = SimpleDataset([0, 1, 2, 3, 4, 5])
>>> ds[1] # Access by int
1
>>> ds[1:3] # Access by slice
[1, 2]
>>> ds[[4, 0]] # Access by one-dimensional integer list
[4, 0]
>>> index = numpy.arange(3)
>>> ds[index] # Access by one-dimensional integer numpy.ndarray
[0, 1, 2]
"""
if isinstance(index, slice):
current, stop, step = index.indices(len(self))
return [self.get_example(i) for i in
six.moves.range(current, stop, step)]
elif isinstance(index, list) or isinstance(index, numpy.ndarray):
return [self.get_example(i) for i in index]
else:
return self.get_example(index)
def __len__(self):
"""Returns the number of data points."""
raise NotImplementedError
def get_example(self, i):
"""Returns the i-th example.
Implementations should override it. It should raise :class:`IndexError`
if the index is invalid.
Args:
i (int): The index of the example.
Returns:
The i-th example.
"""
raise NotImplementedError
| 2,919
| 32.563218
| 79
|
py
|
chainer
|
chainer-master/chainer/dataset/download.py
|
import hashlib
import os
import shutil
import sys
import filelock
from six.moves.urllib import request
from chainer import utils
_dataset_root = os.environ.get(
'CHAINER_DATASET_ROOT',
os.path.join(os.path.expanduser('~'), '.chainer', 'dataset'))
def get_dataset_root():
"""Gets the path to the root directory to download and cache datasets.
Returns:
str: The path to the dataset root directory.
"""
return _dataset_root
def set_dataset_root(path):
"""Sets the root directory to download and cache datasets.
There are two ways to set the dataset root directory. One is by setting the
environment variable ``CHAINER_DATASET_ROOT``. The other is by using this
function. If both are specified, one specified via this function is used.
The default dataset root is ``$HOME/.chainer/dataset``.
Args:
path (str): Path to the new dataset root directory.
"""
global _dataset_root
_dataset_root = path
def get_dataset_directory(dataset_name, create_directory=True):
"""Gets the path to the directory of given dataset.
The generated path is just a concatenation of the global root directory
(see :func:`set_dataset_root` for how to change it) and the dataset name.
The dataset name can contain slashes, which are treated as path separators.
Args:
dataset_name (str): Name of the dataset.
create_directory (bool): If True (default), this function also creates
the directory at the first time. If the directory already exists,
then this option is ignored.
Returns:
str: Path to the dataset directory.
"""
path = os.path.join(_dataset_root, dataset_name)
if create_directory:
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
return path
def cached_download(url):
"""Downloads a file and caches it.
It downloads a file from the URL if there is no corresponding cache. After
the download, this function stores a cache to the directory under the
dataset root (see :func:`set_dataset_root`). If there is already a cache
for the given URL, it just returns the path to the cache without
downloading the same file.
.. note::
This function raises :class:`OSError` when it fails to create
the cache directory. In older version, it raised :class:`RuntimeError`.
Args:
url (str): URL to download from.
Returns:
str: Path to the downloaded file.
"""
cache_root = os.path.join(_dataset_root, '_dl_cache')
try:
os.makedirs(cache_root)
except OSError:
if not os.path.isdir(cache_root):
raise
lock_path = os.path.join(cache_root, '_dl_lock')
urlhash = hashlib.md5(url.encode('utf-8')).hexdigest()
cache_path = os.path.join(cache_root, urlhash)
with filelock.FileLock(lock_path):
if os.path.exists(cache_path):
return cache_path
with utils.tempdir(dir=cache_root) as temp_root:
temp_path = os.path.join(temp_root, 'dl')
sys.stderr.write('Downloading from {}...\n'.format(url))
sys.stderr.flush()
request.urlretrieve(url, temp_path)
with filelock.FileLock(lock_path):
shutil.move(temp_path, cache_path)
return cache_path
def cache_or_load_file(path, creator, loader):
"""Caches a file if it does not exist, or loads it otherwise.
This is a utility function used in dataset loading routines. The
``creator`` creates the file to given path, and returns the content. If the
file already exists, the ``loader`` is called instead, and it loads the
file and returns the content.
Note that the path passed to the creator is temporary one, and not same as
the path given to this function. This function safely renames the file
created by the creator to a given path, even if this function is called
simultaneously by multiple threads or processes.
Args:
path (str): Path to save the cached file.
creator: Function to create the file and returns the content. It takes
a path to temporary place as the argument. Before calling the
creator, there is no file at the temporary path.
loader: Function to load the cached file and returns the content.
Returns:
It returns the returned values by the creator or the loader.
"""
if os.path.exists(path):
return loader(path)
try:
os.makedirs(_dataset_root)
except OSError:
if not os.path.isdir(_dataset_root):
raise RuntimeError('cannot create dataset directory')
lock_path = os.path.join(_dataset_root, '_create_lock')
with utils.tempdir() as temp_dir:
file_name = os.path.basename(path)
temp_path = os.path.join(temp_dir, file_name)
content = creator(temp_path)
with filelock.FileLock(lock_path):
if not os.path.exists(path):
shutil.move(temp_path, path)
return content
| 5,087
| 30.8
| 79
|
py
|
chainer
|
chainer-master/chainer/dataset/iterator.py
|
class Iterator(object):
"""Base class of all dataset iterators.
Iterator iterates over the dataset, yielding a minibatch at each
iteration. Minibatch is a list of examples. Each implementation should
implement an iterator protocol (e.g., the :meth:`__next__` method).
Note that, even if the iterator supports setting the batch size, it does
not guarantee that each batch always contains the same number of examples.
For example, if you let the iterator to stop at the end of the sweep, the
last batch may contain a fewer number of examples.
The interface between the iterator and the underlying dataset is not fixed,
and up to the implementation.
Each implementation should provide the following attributes (not needed to
be writable).
- ``batch_size``: Number of examples within each minibatch.
- ``epoch``: Number of completed sweeps over the dataset.
- ``epoch_detail``: Floating point number version of the epoch. For
example, if the iterator is at the middle of the dataset at the third
epoch, then this value is 2.5.
- ``previous_epoch_detail``: The value of ``epoch_detail`` at the previous
iteration. This value is ``None`` before the first iteration.
- ``is_new_epoch``: ``True`` if the epoch count was incremented at the last
update.
Each implementation should also support serialization to resume/suspend the
iteration.
"""
def __del__(self):
self.finalize()
def __iter__(self):
"""Returns self."""
return self
def __next__(self):
"""Returns the next batch.
This is a part of the iterator protocol of Python. It may raise the
:class:`StopIteration` exception when it stops the iteration.
"""
raise NotImplementedError
def next(self):
"""Python2 alternative of ``__next__``.
It calls :meth:`__next__` by default.
"""
return self.__next__()
def finalize(self):
"""Finalizes the iterator and possibly releases the resources.
This method does nothing by default. Implementation may override it to
better handle the internal resources.
This method can be called multiple times.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.finalize()
def serialize(self, serializer):
"""Serializes the internal state of the iterator.
This is a method to support the serializer protocol of Chainer.
.. note::
It should only serialize the internal state that changes over the
iteration. It should not serialize what is set manually by
users such as the batch size.
"""
pass
| 2,809
| 31.298851
| 79
|
py
|
chainer
|
chainer-master/chainer/dataset/convert.py
|
import collections
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
class Converter(object):
"""Base class of converters.
Converters receive batched data retrieved from iterators and perform
arbitrary transforms as well as device transfer.
Implementation should override the ``__call__`` method.
.. seealso::
:meth:`chainer.dataset.converter` --- a decorator to turn a converter
function into a ``Converter`` instance.
"""
def __call__(self, batch, device):
"""Performs conversion.
Args:
batch:
A batch. The type and value are arbitrary, depending on usage.
device(~chainer.backend.Device):
Device to which the converter is expected to send the batch.
Returns: A converted batch.
"""
raise NotImplementedError(
'Concrete class must implement __call__.')
class _ArbitraryCallableConverter(Converter):
"""Converter to wrap a callable with arbitrary arguments.
This class accepts arbitrary arguments and pass-through to the underlying
callable, with device argument replaced.
"""
def __init__(self, base_callable):
if not callable(base_callable):
raise TypeError(
'Can only wrap a callable. Actual: {}'.format(
type(base_callable)))
self.base_callable = base_callable
def __call__(self, *args, **kwargs):
base_callable = self.base_callable
# Normalize the 'device' argument
if len(args) >= 2:
# specified as a positional argument
args = list(args)
args[1] = _get_device(args[1])
elif 'device' in kwargs:
kwargs['device'] = _get_device(kwargs['device'])
return base_callable(*args, **kwargs)
def converter():
"""Decorator to make a converter.
This decorator turns a converter function into a
:class:`chainer.dataset.Converter` class instance, which also is a
callable.
This is required to use the converter function from an old module that
does not support :class:`chainer.backend.Device` instances
(See the **Device argument conversion** section below).
.. rubric:: Requirements of the target function
The target converter function must accept two positional arguments:
a batch and a device, and return a converted batch.
The type of the device argument is :class:`chainer.backend.Device`.
The types and values of the batches (the first argument and the return
value) are not specified: they depend on how the converter is used (e.g.
by updaters).
.. admonition:: Example
>>> @chainer.dataset.converter()
... def custom_converter(batch, device):
... assert isinstance(device, chainer.backend.Device)
... # do something with batch...
... return device.send(batch)
.. rubric:: Device argument conversion
For backward compatibility, the decorator wraps
the function so that if the converter is called with the device argument
with ``int`` type, it is converted to a :class:`chainer.backend.Device`
instance before calling the original function. The ``int`` value indicates
the CUDA device of the cupy backend.
Without the decorator, the converter cannot support ChainerX devices.
If the batch were requested to be converted to ChainerX with such
converters, :class:`RuntimeError` will be raised.
.. note::
Converters using this decorator can't be pickled
causing :class:`chainer.training.updaters.MultiprocessParallelUpdater`
to fail when the multiprocessing start mode is set to ``'spawn'`` or
``'forkserver'``. Should you need to use such feature, please rely on
class style converters.
"""
def wrap(func):
return _ArbitraryCallableConverter(func)
return wrap
def _call_converter(converter, batch, device):
# Calls the converter.
# Converter can be either new-style (accepts chainer.backend.Device) or
# old-style (accepts int as device).
assert device is None or isinstance(device, backend.Device)
if isinstance(converter, Converter):
# New-style converter
return converter(batch, device)
# Old-style converter
if device is None:
return converter(batch, None)
if device.xp is numpy:
return converter(batch, -1)
if device.xp is cuda.cupy:
return converter(batch, device.device.id)
raise RuntimeError(
'Converter does not support ChainerX. '
'Use chainer.dataset.converter decorator.')
def to_device(device, x):
"""Send an array to a given device.
This method sends a given array to a given device. This method is used in
:func:`~chainer.dataset.concat_examples`.
You can also use this method in a custom converter method used in
:class:`~chainer.training.Updater` and :class:`~chainer.training.Extension`
such as :class:`~chainer.training.updaters.StandardUpdater` and
:class:`~chainer.training.extensions.Evaluator`.
See also :func:`chainer.dataset.concat_examples`.
Args:
device (None or int or device specifier): A device to which an array
is sent. If it is a negative integer, an array is sent to CPU.
If it is a positive integer, an array is sent to GPU with the
given ID. If it is``None``, an array is left in the original
device. Also, any of device specifiers described at
:class:`~chainer.backend.DeviceId` is accepted.
x (:ref:`ndarray`): An array to send.
Returns:
Converted array.
"""
device = _get_device(device)
if device is None:
return x
return device.send(x)
def _get_device(device_spec):
# Converts device specificer to a chainer.Device instance.
# Additionally to chainer.get_device, this function supports None
if device_spec is None:
return None
return backend.get_device(device_spec)
# TODO(hvy): Write unit tests where batch elements contain Python lists.
def concat_examples_func(batch, device=None, padding=None):
"""Concatenates a list of examples into array(s).
This function converts an "array of tuples" into a "tuple of arrays".
Specifically, given a list of examples each of which consists of
a list of elements, this function first makes an array
by taking the element in the same position from each example
and concatenates them along the newly-inserted first axis
(called `batch dimension`) into one array.
It repeats this for all positions and returns the resulting arrays.
The output type depends on the type of examples in ``batch``.
For instance, consider each example consists of two arrays ``(x, y)``.
Then, this function concatenates ``x`` 's into one array, and ``y`` 's
into another array, and returns a tuple of these two arrays. Another
example: consider each example is a dictionary of two entries whose keys
are ``'x'`` and ``'y'``, respectively, and values are arrays. Then, this
function concatenates ``x`` 's into one array, and ``y`` 's into another
array, and returns a dictionary with two entries ``x`` and ``y`` whose
values are the concatenated arrays.
When the arrays to concatenate have different shapes, the behavior depends
on the ``padding`` value. If ``padding`` is ``None`` (default), it raises
an error. Otherwise, it builds an array of the minimum shape that the
contents of all arrays can be substituted to. The padding value is then
used to the extra elements of the resulting arrays.
.. admonition:: Example
>>> import numpy as np
>>> from chainer import dataset
>>> x = [([1, 2], 1),
... ([3, 4], 2),
... ([5, 6], 3)]
>>> dataset.concat_examples(x)
(array([[1, 2],
[3, 4],
[5, 6]]), array([1, 2, 3]))
>>>
>>> y = [(np.array([1, 2]), 0),
... (np.array([3]), 1),
... (np.array([]), 2)]
>>> dataset.concat_examples(y, padding=100)
(array([[ 1, 2],
[ 3, 100],
[100, 100]]), array([0, 1, 2]))
>>>
>>> z = [(np.array([1, 2]), np.array([0])),
... (np.array([3]), np.array([])),
... (np.array([]), np.array([2]))]
>>> dataset.concat_examples(z, padding=(100, 200))
(array([[ 1, 2],
[ 3, 100],
[100, 100]]), array([[ 0],
[200],
[ 2]]))
>>> w = [{'feature': np.array([1, 2]), 'label': 0},
... {'feature': np.array([3, 4]), 'label': 1},
... {'feature': np.array([5, 6]), 'label': 2}]
>>> dataset.concat_examples(w) # doctest: +SKIP
{'feature': array([[1, 2],
[3, 4],
[5, 6]]), 'label': array([0, 1, 2])}
Args:
batch (list): A list of examples. This is typically given by a dataset
iterator.
device (device specifier): A device to which each array is sent.
If it is omitted, all arrays are left in their original devices.
See :meth:`~chainer.dataset.convert.to_device` for more details.
padding: Scalar value for extra elements. If this is None (default),
an error is raised on shape mismatch. Otherwise, an array of
minimum dimensionalities that can accommodate all arrays is
created, and elements outside of the examples are padded by this
value.
Returns:
Array, a tuple of arrays, or a dictionary of arrays. The type depends
on the type of each example in the batch.
"""
assert device is None or isinstance(device, backend.Device)
if not batch:
raise ValueError('batch is empty')
first_elem = batch[0]
if isinstance(first_elem, tuple):
result = []
if not isinstance(padding, tuple):
padding = [padding] * len(first_elem)
for i in six.moves.range(len(first_elem)):
result.append(to_device(device, _concat_arrays(
[example[i] for example in batch], padding[i])))
return tuple(result)
elif isinstance(first_elem, dict):
result = {}
if not isinstance(padding, dict):
padding = {key: padding for key in first_elem}
for key in first_elem:
result[key] = to_device(device, _concat_arrays(
[example[key] for example in batch], padding[key]))
return result
else:
return to_device(device, _concat_arrays(batch, padding))
def _concat_arrays(arrays, padding):
# Convert `arrays` to numpy.ndarray if `arrays` consists of the built-in
# types such as int, float or list.
if not isinstance(arrays[0], chainer.get_array_types()):
arrays = numpy.asarray(arrays)
if padding is not None:
arr_concat = _concat_arrays_with_padding(arrays, padding)
else:
device = backend.get_device_from_array(arrays[0])
with chainer.using_device(device):
arr_concat = device.xp.concatenate(
[array[None] for array in arrays])
return arr_concat
def _concat_arrays_with_padding(arrays, padding):
shape = numpy.array(arrays[0].shape, dtype=int)
for array in arrays[1:]:
if numpy.any(shape != array.shape):
numpy.maximum(shape, array.shape, shape)
shape = tuple(numpy.insert(shape, 0, len(arrays)))
device = backend.get_device_from_array(arrays[0])
with chainer.using_device(device):
result = device.xp.full(shape, padding, dtype=arrays[0].dtype)
for i in six.moves.range(len(arrays)):
src = arrays[i]
slices = tuple(slice(dim) for dim in src.shape)
result[(i,) + slices] = src
return result
concat_examples = _ArbitraryCallableConverter(concat_examples_func)
class ConcatWithAsyncTransfer(object):
"""Interface to concatenate data and transfer them to GPU asynchronously.
It enables to transfer next batch of input data to GPU while GPU is
running kernels for training using current batch of input data.
An instance of this class is mainly intended to be used as a converter
function of an updater like below.
.. doctest::
from chainer.dataset import convert
...
updater = chainer.training.updaters.StandardUpdater(
...,
converter=convert.ConcatWithAsyncTransfer(),
...)
Args:
stream (cupy.cuda.Stream): CUDA stream. If ``None``, a stream is
automatically created on the first call. Data transfer operation
is launched asynchronously using the stream.
compute_stream(cupy.cuda.Stream): CUDA stream used for compute kernels.
If not ``None``, CUDA events are created/used to avoid global
synchronization and overlap execution of compute kernels and data
transfers as much as possible. If ``None``, global synchronization
is used instead.
"""
def __init__(self, stream=None, compute_stream=None):
self._stream = stream
self.compute_stream = compute_stream
self._device = None
self._conveyor = collections.defaultdict(
self._get_conveyor)
if compute_stream is not None:
# * event1 prevents a CPU thread to update arrays that might be
# still being used by GPU kernels.
# * event2 prevents a GPU kernel to read arrays that might be
# still being transferred to GPU.
self._event1 = cuda.Event()
self._event2 = cuda.Event()
self._sync_get = False
else:
self._sync_get = True
def _get_conveyor(self):
return Conveyor(self._device, self._stream)
def __call__(self, batch, device=None, padding=None):
"""Concatenate data and transfer them to GPU asynchronously.
See also :func:`chainer.dataset.concat_examples`.
Args:
batch (list): A list of examples.
device (int): Device ID to which each array is sent.
padding: Scalar value for extra elements.
Returns:
Array, a tuple of arrays, or a dictionary of arrays.
The type depends on the type of each example in the batch.
"""
if not batch:
raise ValueError('batch is empty')
first_elem = batch[0]
if not self._conveyor:
self._device = device # device is set at first call
if device is not None and device >= 0 and self._stream is None:
with cuda.get_device_from_id(device):
self._stream = cuda.Stream(non_blocking=True)
if device is not self._device:
raise ValueError('device is different')
if self.compute_stream is not None:
self._event1.synchronize()
self._event1.record(stream=self.compute_stream)
with cuda.get_device_from_id(device):
if isinstance(first_elem, tuple):
result = []
if not isinstance(padding, tuple):
padding = [padding] * len(first_elem)
for i in six.moves.range(len(first_elem)):
self._conveyor[i].put(_concat_arrays(
[example[i] for example in batch], padding[i]))
for i in six.moves.range(len(first_elem)):
result.append(self._conveyor[i].get(sync=self._sync_get))
if self.compute_stream is not None:
self._event2.record(stream=self._stream)
self.compute_stream.wait_event(self._event2)
return tuple(result)
elif isinstance(first_elem, dict):
result = {}
if not isinstance(padding, dict):
padding = {key: padding for key in first_elem}
for key in first_elem:
self._conveyor[key].put(_concat_arrays(
[example[key] for example in batch], padding[key]))
for key in first_elem:
result[key] = self._conveyor[key].get(sync=self._sync_get)
if self.compute_stream is not None:
self._event2.record(stream=self._stream)
self.compute_stream.wait_event(self._event2)
return result
else:
return to_device(device, _concat_arrays(batch, padding))
class Conveyor(object):
"""Interface to handle asynchronous data transfer using double buffering.
An asynchronous data transfer is initiated by :meth:`put`, and the result,
the array transferred to a target device, is obtained by :meth:`get`.
You should call :meth:`put` followed by :meth:`get`.
Args:
device (int): Device ID to which an array is sent. Negative value
indicates the host memory (CPU). If it is omitted, the array is
left in the original device. Asynchronous data transfer is used
only when device ID >= 0.
stream (cupy.cuda.Stream): CUDA stream. An array is sent to GPU
asynchronously using this stream. If ``None``, asynchronous data
transfer is not used.
"""
def __init__(self, device=None, stream=None):
self._device = device
self._stream = stream
self._array_set = [[None, None], [None, None]]
self._ret_array = []
def put(self, array):
"""Initiates asynchronous transfer of an array to a target device.
This method assumes that the input array is a numpy array and
on host memory without page-locked. So, it first copies the data
to page-locked host memory (so called pinned memory), then initiates
asynchronous data transfer to a target device.
The intermediate arrays on pinned memory and cupy arrays on the
target device are retained at self._array_set in order to reduce number
of memory allocation/release, and they are to be reused for subsequent
data transfer as long as the size are the same.
Double buffering scheme is used here, so you can initiate next data
transfer safely even when current data is still used on the target
device.
"""
if self._device is None or self._device < 0 or self._stream is None:
self._ret_array.append(to_device(self._device, array))
return
pin_array, cp_array = self._array_set.pop(0)
if pin_array is not None:
if pin_array.nbytes != array.nbytes:
pin_array = None
with cuda.get_device_from_id(self._device):
if pin_array is None:
# The global synchronization below is necessary to ensure ALL
# operations including compute and data transfer submitted
# to GPU so far have been completed, in order to avoid possible
# memory corruption due to race condition among operations that
# use different CUDA streams.
# You can also solve this sort of race condition by preparing a
# memory pool for each CUDA stream and using it carefully.
cuda.cupy.cuda.runtime.deviceSynchronize()
pin_mem = cuda.cupy.cuda.alloc_pinned_memory(array.nbytes)
pin_array = numpy.frombuffer(pin_mem,
array.dtype,
array.size
).reshape(array.shape)
cp_array = cuda.cupy.empty(array.shape, array.dtype)
pin_array[...] = array # copy(CPU): paged -> pinned
cp_array.set(pin_array, self._stream) # copy: CPU to GPU
self._array_set.append([pin_array, cp_array])
self._ret_array.append(cp_array)
def get(self, sync=True):
"""Returns the array of data transferred to a target device asynchronously.
If sync is ``True``, the data of returned array is available in GPU
kernels. If sync is ``False``, the data of returned array might be
being transferred to GPU, so synchronization must be done carefully by
the calling function.
Args:
sync (bool): If ``True``, global synchronization is used to ensure
completion of asynchronous data transfer for safer reason.
If ``False``, it assumes a caller function is handling
synchronization correctly hence does not use global
synchronization.
"""
if (self._device is not None and self._device >= 0 and
self._stream is not None):
if sync:
cuda.cupy.cuda.runtime.deviceSynchronize()
return self._ret_array.pop(0)
| 21,146
| 37.102703
| 83
|
py
|
chainer
|
chainer-master/chainer/dataset/__init__.py
|
# import classes and functions
from chainer.dataset.convert import concat_examples # NOQA
from chainer.dataset.convert import ConcatWithAsyncTransfer # NOQA
from chainer.dataset.convert import converter # NOQA
from chainer.dataset.convert import Converter # NOQA
from chainer.dataset.convert import to_device # NOQA
from chainer.dataset.dataset_mixin import DatasetMixin # NOQA
from chainer.dataset.download import cache_or_load_file # NOQA
from chainer.dataset.download import cached_download # NOQA
from chainer.dataset.download import get_dataset_directory # NOQA
from chainer.dataset.download import get_dataset_root # NOQA
from chainer.dataset.download import set_dataset_root # NOQA
from chainer.dataset.iterator import Iterator # NOQA
from chainer.dataset.tabular.tabular_dataset import TabularDataset # NOQA
| 829
| 54.333333
| 74
|
py
|
chainer
|
chainer-master/chainer/dataset/tabular/_asmode.py
|
from chainer.dataset.tabular import tabular_dataset
class _Astuple(tabular_dataset.TabularDataset):
def __init__(self, dataset):
self._dataset = dataset
def __len__(self):
return len(self._dataset)
@property
def keys(self):
return self._dataset.keys
@property
def mode(self):
return tuple
def get_examples(self, indices, key_indices):
return self._dataset.get_examples(indices, key_indices)
def convert(self, data):
return self._dataset.convert(data)
class _Asdict(tabular_dataset.TabularDataset):
def __init__(self, dataset):
self._dataset = dataset
def __len__(self):
return len(self._dataset)
@property
def keys(self):
return self._dataset.keys
@property
def mode(self):
return dict
def get_examples(self, indices, key_indices):
return self._dataset.get_examples(indices, key_indices)
def convert(self, data):
return self._dataset.convert(data)
| 1,024
| 20.354167
| 63
|
py
|
chainer
|
chainer-master/chainer/dataset/tabular/_transform.py
|
import six
from chainer.dataset.tabular import tabular_dataset
class _Transform(tabular_dataset.TabularDataset):
def __init__(self, dataset, keys, transform):
if not isinstance(keys, tuple):
keys = keys,
self._dataset = dataset
self._keys = keys
self._transform = transform
def __len__(self):
return len(self._dataset)
@property
def keys(self):
return self._keys
@property
def mode(self):
if not hasattr(self, '_mode'):
self.get_examples([0], None)
return self._mode
def get_examples(self, indices, key_indices):
if key_indices is None:
key_indices = six.moves.range(len(self._keys))
in_examples = self._dataset.get_examples(indices, None)
out_examples = tuple([] for _ in key_indices)
for in_example in six.moves.zip(*in_examples):
if self._dataset.mode is tuple:
out_example = self._transform(*in_example)
elif self._dataset.mode is dict:
out_example = self._transform(
**dict(six.moves.zip(self._dataset.keys, in_example)))
elif self._dataset.mode is None:
out_example = self._transform(*in_example)
if isinstance(out_example, tuple):
if hasattr(self, '_mode') and self._mode is not tuple:
raise ValueError(
'transform must not change its return type')
self._mode = tuple
for col_index, key_index in enumerate(key_indices):
out_examples[col_index].append(out_example[key_index])
elif isinstance(out_example, dict):
if hasattr(self, '_mode') and self._mode is not dict:
raise ValueError(
'transform must not change its return type')
self._mode = dict
for col_index, key_index in enumerate(key_indices):
out_examples[col_index].append(
out_example[self._keys[key_index]])
else:
if hasattr(self, '_mode') and self._mode is not None:
raise ValueError(
'transform must not change its return type')
self._mode = None
out_example = out_example,
for col_index, key_index in enumerate(key_indices):
out_examples[col_index].append(out_example[key_index])
return out_examples
def convert(self, data):
return self._dataset.convert(data)
class _TransformBatch(tabular_dataset.TabularDataset):
def __init__(self, dataset, keys, transform_batch):
if not isinstance(keys, tuple):
keys = keys,
self._dataset = dataset
self._keys = keys
self._transform_batch = transform_batch
def __len__(self):
return len(self._dataset)
@property
def keys(self):
return self._keys
@property
def mode(self):
if not hasattr(self, '_mode'):
self.get_examples([0], None)
return self._mode
def get_examples(self, indices, key_indices):
if indices is None:
len_ = len(self)
elif isinstance(indices, slice):
start, stop, step = indices.indices(len(self))
len_ = len(six.moves.range(start, stop, step))
else:
len_ = len(indices)
if key_indices is None:
key_indices = six.moves.range(len(self._keys))
in_examples = self._dataset.get_examples(indices, None)
if self._dataset.mode is tuple:
out_examples = self._transform_batch(*in_examples)
elif self._dataset.mode is dict:
out_examples = self._transform_batch(
**dict(six.moves.zip(self._dataset.keys, in_examples)))
elif self._dataset.mode is None:
out_examples = self._transform_batch(*in_examples)
if isinstance(out_examples, tuple):
if hasattr(self, '_mode') and self._mode is not tuple:
raise ValueError(
'transform_batch must not change its return type')
self._mode = tuple
if not all(len(col) == len_ for col in out_examples):
raise ValueError(
'transform_batch must not change the length of data')
return tuple(out_examples[key_index]
for key_index in key_indices)
elif isinstance(out_examples, dict):
if hasattr(self, '_mode') and self._mode is not dict:
raise ValueError(
'transform_batch must not change its return type')
self._mode = dict
if not all(len(col) == len_ for col in out_examples.values()):
raise ValueError(
'transform_batch must not change the length of data')
return tuple(out_examples[self._keys[key_index]]
for key_index in key_indices)
else:
if hasattr(self, '_mode') and self._mode is not None:
raise ValueError(
'transform_batch must not change its return type')
self._mode = None
out_examples = out_examples,
if not all(len(col) == len_ for col in out_examples):
raise ValueError(
'transform_batch must not change the length of data')
return tuple(out_examples[key_index]
for key_index in key_indices)
def convert(self, data):
return self._dataset.convert(data)
| 5,703
| 36.038961
| 74
|
py
|
chainer
|
chainer-master/chainer/dataset/tabular/_with_converter.py
|
from chainer.dataset.tabular import tabular_dataset
class _WithConverter(tabular_dataset.TabularDataset):
def __init__(self, dataset, converter):
self._dataset = dataset
self._converter = converter
def __len__(self):
return len(self._dataset)
@property
def keys(self):
return self._dataset.keys
@property
def mode(self):
return self._dataset.mode
def get_examples(self, indices, key_indices):
return self._dataset.get_examples(indices, key_indices)
def convert(self, data):
if isinstance(data, tuple):
return self._converter(*data)
elif isinstance(data, dict):
return self._converter(**data)
else:
return self._converter(data)
| 775
| 24.032258
| 63
|
py
|
chainer
|
chainer-master/chainer/dataset/tabular/from_data.py
|
import chainer
from chainer.dataset.tabular import tabular_dataset
def from_data(data, *, size=None):
"""Create a TabularDataset from lists/arrays/callables.
>>> from chainer.dataset import tabular
>>>
>>> dataset = tabular.from_data([0, 1, 2])
>>> dataset[0]
0
>>> dataset = tabular.from_data(([0, 1, 2], [3, 4, 5]))
>>> dataset[0]
(0, 3)
>>> dataset = tabular.from_data((('a', [0, 1, 2]), ('b', [3, 4, 5])))
>>> dataset.keys
('a', 'b')
>>> dataset[0]
(0, 3)
>>> dataset = tabular.from_data({'a': [0, 1, 2], 'b': [3, 4, 5]})
>>> sorted(dataset[0].items())
[('a', 0), ('b', 3)]
>>> dataset = tabular.from_data(('a', lambda i: i * i), size=10)
>>> dataset[5]
25
Args:
data (list, array, tuple, or dict): Data in following format.
- `list/array`
- `(str, list/array/callable)`
- `((str, ...), callable)`
- `((list/array)/(str, list/array/callable) \
/((key, ...), callable), ...)`
- `{str: (list/array/callable)/(str, ...): callable, ...}`
size (int): The length of the dataset.
This argument is required \
when no lists/arrays exist in :obj:`data`.
Return:
A :class:`~chainer.dataset.TabularDataset`.
"""
if isinstance(data, tuple):
if len(data) == 2:
key, d = data
if isinstance(key, str):
return _make_dataset(key, d, size)
if isinstance(key, tuple) and all(isinstance(k, str) for k in key):
return _make_dataset(key, d, size)
for d in data:
if isinstance(d, tuple):
_, d = d
if size is None:
try:
size = len(d)
except TypeError:
pass
datasets = []
for d in data:
if isinstance(d, tuple):
key, d = d
else:
key = None
datasets.append(_make_dataset(key, d, size))
return datasets[0].join(*datasets[1:]).astuple()
elif isinstance(data, dict):
for d in data.values():
if size is None:
try:
size = len(d)
except TypeError:
pass
datasets = []
for key, d in data.items():
datasets.append(_make_dataset(key, d, size))
return datasets[0].join(*datasets[1:]).asdict()
else:
return _make_dataset(None, data, size)
def _make_dataset(key, data, size):
if isinstance(data, chainer.get_array_types()):
if key is None:
key = '_{}'.format(id(data))
return _Array(key, data)
elif isinstance(data, list):
if key is None:
key = '_{}'.format(id(data))
return _List(key, data)
elif callable(data):
if key is None:
raise ValueError('key(s) must be specified for callable')
if size is None:
raise ValueError('size must be specified for callable')
return _Index(size).transform(key, data)
class _Array(tabular_dataset.TabularDataset):
def __init__(self, key, data):
self._key = key
self._data = data
def __len__(self):
return len(self._data)
@property
def keys(self):
return self._key,
@property
def mode(self):
return None
def get_examples(self, indices, key_indices):
if key_indices is None:
key_indices = 0,
if indices is None:
return (self._data,) * len(key_indices)
else:
return (self._data[indices],) * len(key_indices)
class _List(tabular_dataset.TabularDataset):
def __init__(self, key, data):
self._key = key
self._data = data
def __len__(self):
return len(self._data)
@property
def keys(self):
return self._key,
@property
def mode(self):
return None
def get_examples(self, indices, key_indices):
if key_indices is None:
key_indices = 0,
if indices is None:
return (self._data,) * len(key_indices)
elif isinstance(indices, slice):
return (self._data[indices],) * len(key_indices)
else:
return ([self._data[index] for index in indices],) \
* len(key_indices)
class _Index(tabular_dataset.TabularDataset):
def __init__(self, size):
self._len = size
def __len__(self):
return self._len
@property
def keys(self):
return 'index',
@property
def mode(self):
return None
def get_examples(self, indices, key_indices):
if indices is None:
indices = slice(None)
if isinstance(indices, slice):
start, stop, step = indices.indices(len(self))
indices = list(range(start, stop, step))
if key_indices is None:
key_indices = 0,
return (indices,) * len(key_indices)
| 5,074
| 25.570681
| 79
|
py
|
chainer
|
chainer-master/chainer/dataset/tabular/_join.py
|
import six
from chainer.dataset.tabular import tabular_dataset
class _Join(tabular_dataset.TabularDataset):
def __init__(self, *datasets):
keys = set(datasets[0].keys)
for dataset in datasets[1:]:
if not len(dataset) == len(datasets[0]):
raise ValueError('All datasets must have the same length')
if len(keys.intersection(dataset.keys)) > 0:
raise ValueError('All keys must be unique among all datasets')
keys = keys.union(dataset.keys)
self._datasets = datasets
def __len__(self):
return len(self._datasets[0])
@property
def keys(self):
return tuple(key for dataset in self._datasets for key in dataset.keys)
@property
def mode(self):
for dataset in self._datasets:
if dataset.mode:
return dataset.mode
return tuple
def get_examples(self, indices, key_indices):
if key_indices is None:
return tuple(
col
for dataset in self._datasets
for col in dataset.get_examples(indices, None))
examples = {}
key_offset = 0
for dataset in self._datasets:
sub_key_indices = []
for key_index in key_indices:
sub_key_index = key_index - key_offset
if sub_key_index < 0 or len(dataset.keys) <= sub_key_index:
continue
if sub_key_index not in sub_key_indices:
sub_key_indices.append(sub_key_index)
if len(sub_key_indices) > 0:
sub_key_indices = tuple(sub_key_indices)
sub_examples = dataset.get_examples(indices, sub_key_indices)
for sub_key_index, col_example in six.moves.zip(
sub_key_indices, sub_examples):
examples[key_offset + sub_key_index] = col_example
key_offset += len(dataset.keys)
return tuple(examples[key_index] for key_index in key_indices)
def convert(self, data):
return self._datasets[0].convert(data)
| 2,138
| 32.421875
| 79
|
py
|
chainer
|
chainer-master/chainer/dataset/tabular/delegate_dataset.py
|
from chainer.dataset.tabular import tabular_dataset
class DelegateDataset(tabular_dataset.TabularDataset):
"""A helper class to implement a TabularDataset.
This class wraps an instance of :class:`~chainer.dataset.TabularDataset`
and provides methods of :class:`~chainer.dataset.TabularDataset`.
This class is useful to create a custom dataset class by inheriting it.
>>> import numpy as np
>>>
>>> from chainer.dataset import tabular
>>>
>>> class MyDataset(tabular.DelegateDataset):
...
... def __init__(self):
... super().__init__(tabular.from_data((
... ('a', np.arange(10)),
... ('b', self.get_b),
... ('c', [3, 1, 4, 5, 9, 2, 6, 8, 7, 0]),
... (('d', 'e'), self.get_de))))
...
... def get_b(self, i):
... return 'b[{}]'.format(i)
...
... def get_de(self, i):
... return {'d': 'd[{}]'.format(i), 'e': 'e[{}]'.format(i)}
...
>>> dataset = MyDataset()
>>> len(dataset)
10
>>> dataset.keys
('a', 'b', 'c', 'd', 'e')
>>> dataset[0]
(0, 'b[0]', 3, 'd[0]', 'e[0]')
Args:
dataset (chainer.dataset.TabularDataset): An underlying dataset.
"""
def __init__(self, dataset):
self.dataset = dataset
def __len__(self):
return len(self.dataset)
@property
def keys(self):
return self.dataset.keys
@property
def mode(self):
return self.dataset.mode
def get_examples(self, indices, key_indices):
return self.dataset.get_examples(indices, key_indices)
| 1,637
| 26.3
| 76
|
py
|
chainer
|
chainer-master/chainer/dataset/tabular/_slice.py
|
import numbers
import numpy as np
import six
from chainer.dataset.tabular import tabular_dataset
class _Slice(tabular_dataset.TabularDataset):
def __init__(self, dataset, indices, keys):
if keys is None:
self._unary = None
elif isinstance(keys, tuple):
self._unary = False
else:
self._unary = True
keys = keys,
self._dataset = dataset
self._indices = _as_indices(indices, len(dataset))
self._key_indices = _as_key_indices(keys, dataset.keys)
def __len__(self):
if self._indices is None:
return len(self._dataset)
elif isinstance(self._indices, slice):
start, stop, step = self._indices.indices(len(self._dataset))
return len(six.moves.range(start, stop, step))
else:
return len(self._indices)
@property
def keys(self):
if self._key_indices is None:
return self._dataset.keys
else:
return tuple(self._dataset.keys[key_index]
for key_index in self._key_indices)
@property
def mode(self):
if self._unary is None:
return self._dataset.mode
elif self._unary:
return None
else:
return self._dataset.mode or tuple
def get_examples(self, indices, key_indices):
indices = _merge_indices(
self._indices, indices, len(self._dataset), len(self))
key_indices = _merge_key_indices(self._key_indices, key_indices)
return self._dataset.get_examples(indices, key_indices)
def convert(self, data):
return self._dataset.convert(data)
class _SliceHelper(object):
def __init__(self, dataset):
self._dataset = dataset
def __getitem__(self, args):
if isinstance(args, tuple):
indices, keys = args
else:
indices = args
keys = None
return _Slice(self._dataset, indices, keys)
def _as_indices(indices, len_):
if isinstance(indices, slice) or len(indices) == 0:
return indices
if all(isinstance(index, (bool, np.bool_)) for index in indices):
if not len(indices) == len_:
raise ValueError('The number of booleans is '
'different from the length of dataset')
return [i for i, index in enumerate(indices) if index]
else:
checked_indices = []
for index in indices:
index = int(index)
if index < 0:
index += len_
if index < 0 or len_ <= index:
raise IndexError(
'index {} is out of bounds for dataset with size {}'
.format(index, len_))
checked_indices.append(index)
return checked_indices
def _as_key_indices(keys, key_names):
if keys is None:
return keys
key_indices = []
for key in keys:
if isinstance(key, numbers.Integral):
key_index = key
if key_index < 0:
key_index += len(key_names)
if key_index < 0 or len(key_names) <= key_index:
raise IndexError(
'index {} is out of bounds for keys with size {}'.format(
key, len(key_names)))
else:
try:
key_index = key_names.index(key)
except ValueError:
raise KeyError('{} does not exists'.format(key))
key_indices.append(key_index)
return tuple(key_indices)
def _merge_indices(a, b, len_a, len_b):
if a is None and b is None:
return None
elif a is None:
return b
elif b is None:
return a
elif isinstance(a, slice) and isinstance(b, slice):
a_start, a_stop, a_step = a.indices(len_a)
b_start, b_stop, b_step = b.indices(len_b)
start = a_start + a_step * b_start
stop = a_start + a_step * b_stop
step = a_step * b_step
if start < 0 and step > 0:
start = None
if stop < 0 and step < 0:
stop = None
return slice(start, stop, step)
elif isinstance(a, slice):
a_start, _, a_step = a.indices(len_a)
return [a_start + a_step * index for index in b]
elif isinstance(b, slice):
return a[b]
else:
return [a[index] for index in b]
def _merge_key_indices(a, b):
if a is None and b is None:
return None
elif a is None:
return b
elif b is None:
return a
else:
return tuple(a[index] for index in b)
| 4,631
| 27.95
| 77
|
py
|
chainer
|
chainer-master/chainer/dataset/tabular/_concat.py
|
import six
from chainer.dataset.tabular import tabular_dataset
class _Concat(tabular_dataset.TabularDataset):
def __init__(self, *datasets):
for dataset in datasets[1:]:
if not dataset.keys == datasets[0].keys:
raise ValueError('All datasets must have the same keys')
self._datasets = datasets
def __len__(self):
return sum(len(dataset) for dataset in self._datasets)
@property
def keys(self):
return self._datasets[0].keys
@property
def mode(self):
return self._datasets[0].mode
def get_examples(self, indices, key_indices):
if key_indices is None:
n_cols = len(self.keys)
else:
n_cols = len(key_indices)
if indices is None:
examples = [
dataset.get_examples(None, key_indices)
for dataset in self._datasets]
return tuple(
[data
for sub_examples in examples
for data in sub_examples[col_index]]
for col_index in six.moves.range(n_cols))
elif isinstance(indices, slice):
start, stop, step = indices.indices(len(self))
examples = []
offset = 0
for dataset in self._datasets:
sub_start = start - offset
sub_stop = stop - offset
if step > 0:
if sub_start < 0:
sub_start %= step
sub_stop = min(sub_stop, len(dataset))
else:
if sub_start >= len(dataset):
sub_start = \
len(dataset) + (sub_start - len(dataset)) % step
sub_stop = max(sub_stop, -1)
if len(six.moves.range(sub_start, sub_stop, step)) > 0:
if sub_start < 0 and step > 0:
sub_start = None
if sub_stop < 0 and step < 0:
sub_stop = None
examples.append(dataset.get_examples(
slice(sub_start, sub_stop, step), key_indices))
offset += len(dataset)
if len(examples) == 0:
return tuple([] for _ in six.moves.range(n_cols))
elif len(examples) == 1:
return examples[0]
else:
if step < 0:
examples.reverse()
return tuple(
[data
for sub_examples in examples
for data in sub_examples[col_index]]
for col_index in six.moves.range(n_cols))
else:
examples = {}
example_indices = [None] * len(indices)
offset = 0
for dataset_index, dataset in enumerate(self._datasets):
sub_indices = []
for p, index in enumerate(indices):
if index < offset or offset + len(dataset) <= index:
continue
sub_indices.append(index - offset)
example_indices[p] = (
dataset_index, len(sub_indices) - 1)
if len(sub_indices) > 0:
examples[dataset_index] = dataset.get_examples(
sub_indices, key_indices)
offset += len(dataset)
if len(examples) == 0:
return tuple([] for _ in six.moves.range(n_cols))
elif len(examples) == 1:
return list(examples.values())[0]
else:
return tuple(
[examples[dataset_index][col_index][p]
for dataset_index, p in example_indices]
for col_index in six.moves.range(n_cols))
def convert(self, data):
return self._datasets[0].convert(data)
| 3,939
| 33.561404
| 76
|
py
|
chainer
|
chainer-master/chainer/dataset/tabular/__init__.py
|
from chainer.dataset.tabular import _asmode # NOQA
from chainer.dataset.tabular import _concat # NOQA
from chainer.dataset.tabular import _join # NOQA
from chainer.dataset.tabular import _slice # NOQA
from chainer.dataset.tabular import _transform # NOQA
from chainer.dataset.tabular import _with_converter # NOQA
from chainer.dataset.tabular.delegate_dataset import DelegateDataset # NOQA
from chainer.dataset.tabular.from_data import from_data # NOQA
| 462
| 45.3
| 76
|
py
|
chainer
|
chainer-master/chainer/dataset/tabular/tabular_dataset.py
|
import six
import chainer
from chainer.dataset import dataset_mixin
class TabularDataset(dataset_mixin.DatasetMixin):
"""An abstract class that represents tabular dataset.
This class represents a tabular dataset.
In a tabular dataset, all examples have the same number of elements.
For example, all examples of the dataset below have three elements
(:obj:`a[i]`, :obj:`b[i]`, and :obj:`c[i]`).
.. csv-table::
:header: , a, b, c
0, :obj:`a[0]`, :obj:`b[0]`, :obj:`c[0]`
1, :obj:`a[1]`, :obj:`b[1]`, :obj:`c[1]`
2, :obj:`a[2]`, :obj:`b[2]`, :obj:`c[2]`
3, :obj:`a[3]`, :obj:`b[3]`, :obj:`c[3]`
Since an example can be represented by both tuple and dict (
:obj:`(a[i], b[i], c[i])` and :obj:`{'a': a[i], 'b': b[i], 'c': c[i]}`),
this class uses :attr:`mode` to indicate which representation will be used.
If there is only one column, an example also can be represented by a value
(:obj:`a[i]`). In this case, :attr:`mode` is :obj:`None`.
An inheritance should implement
:meth:`__len__`, :attr:`keys`, :attr:`mode` and :meth:`get_examples`.
>>> import numpy as np
>>>
>>> from chainer import dataset
>>>
>>> class MyDataset(dataset.TabularDataset):
...
... def __len__(self):
... return 4
...
... @property
... def keys(self):
... return ('a', 'b', 'c')
...
... @property
... def mode(self):
... return tuple
...
... def get_examples(self, indices, key_indices):
... data = np.arange(12).reshape((4, 3))
... if indices is not None:
... data = data[indices]
... if key_indices is not None:
... data = data[:, list(key_indices)]
... return tuple(data.transpose())
...
>>> dataset = MyDataset()
>>> len(dataset)
4
>>> dataset.keys
('a', 'b', 'c')
>>> dataset.astuple()[0]
(0, 1, 2)
>>> sorted(dataset.asdict()[0].items())
[('a', 0), ('b', 1), ('c', 2)]
>>>
>>> view = dataset.slice[[3, 2], ('c', 0)]
>>> len(view)
2
>>> view.keys
('c', 'a')
>>> view.astuple()[1]
(8, 6)
>>> sorted(view.asdict()[1].items())
[('a', 6), ('c', 8)]
"""
def __len__(self):
raise NotImplementedError
@property
def keys(self):
"""Names of columns.
A tuple of strings that indicate the names of columns.
"""
raise NotImplementedError
@property
def mode(self):
"""Mode of representation.
This indicates the type of value returned
by :meth:`fetch` and :meth:`__getitem__`.
:class:`tuple`, :class:`dict`, and :obj:`None` are supported.
"""
raise NotImplementedError
def get_examples(self, indices, key_indices):
"""Return a part of data.
Args:
indices (list of ints or slice): Indices of requested rows.
If this argument is :obj:`None`, it indicates all rows.
key_indices (tuple of ints): Indices of requested columns.
If this argument is :obj:`None`, it indicates all columns.
Returns:
tuple of lists/arrays
"""
raise NotImplementedError
@property
def slice(self):
"""Get a slice of dataset.
Args:
indices (list/array of ints/bools or slice): Requested rows.
keys (tuple of ints/strs or int or str): Requested columns.
Returns:
A view of specified range.
"""
return chainer.dataset.tabular._slice._SliceHelper(self)
def fetch(self):
"""Fetch data.
This method fetches all data of the dataset/view.
Note that this method returns a column-major data
(i.e. :obj:`([a[0], ..., a[3]], ..., [c[0], ... c[3]])`,
:obj:`{'a': [a[0], ..., a[3]], ..., 'c': [c[0], ..., c[3]]}`, or
:obj:`[a[0], ..., a[3]]`).
Returns:
If :attr:`mode` is :class:`tuple`,
this method returns a tuple of lists/arrays.
If :attr:`mode` is :class:`dict`,
this method returns a dict of lists/arrays.
"""
examples = self.get_examples(None, None)
if self.mode is tuple:
return examples
elif self.mode is dict:
return dict(six.moves.zip(self.keys, examples))
elif self.mode is None:
return examples[0]
def convert(self, data):
"""Convert fetched data.
This method takes data fetched by :meth:`fetch` and
pre-process them before passing them to models.
The default behaviour is converting each column into an ndarray.
This behaviour can be overridden by :meth:`with_converter`.
If the dataset is constructed by :meth:`concat` or :meth:`join`,
the converter of the first dataset is used.
Args:
data (tuple or dict): Data from :meth:`fetch`.
Returns:
A tuple or dict.
Each value is an ndarray.
"""
if isinstance(data, tuple):
return tuple(_as_array(d) for d in data)
elif isinstance(data, dict):
return {k: _as_array(v) for k, v in data.items()}
else:
return _as_array(data)
def astuple(self):
"""Return a view with tuple mode.
Returns:
A view whose :attr:`mode` is :class:`tuple`.
"""
return chainer.dataset.tabular._asmode._Astuple(self)
def asdict(self):
"""Return a view with dict mode.
Returns:
A view whose :attr:`mode` is :class:`dict`.
"""
return chainer.dataset.tabular._asmode._Asdict(self)
def concat(self, *datasets):
"""Stack datasets along rows.
Args:
datasets (iterable of :class:`TabularDataset`):
Datasets to be concatenated.
All datasets must have the same :attr:`keys`.
Returns:
A concatenated dataset.
"""
return chainer.dataset.tabular._concat._Concat(self, *datasets)
def join(self, *datasets):
"""Stack datasets along columns.
Args:
datasets (iterable of :class:`TabularDataset`):
Datasets to be concatenated.
All datasets must have the same length
Returns:
A joined dataset.
"""
return chainer.dataset.tabular._join._Join(self, *datasets)
def transform(self, keys, transform):
"""Apply a transform to each example.
Args:
keys (tuple of strs): The keys of transformed examples.
transform (callable): A callable that takes an example
and returns transformed example. :attr:`mode` of
transformed dataset is determined by the transformed
examples.
Returns:
A transfromed dataset.
"""
return chainer.dataset.tabular._transform._Transform(
self, keys, transform)
def transform_batch(self, keys, transform_batch):
"""Apply a transform to examples.
Args:
keys (tuple of strs): The keys of transformed examples.
transform_batch (callable): A callable that takes examples
and returns transformed examples. :attr:`mode` of
transformed dataset is determined by the transformed
examples.
Returns:
A transfromed dataset.
"""
return chainer.dataset.tabular._transform._TransformBatch(
self, keys, transform_batch)
def with_converter(self, converter):
"""Override the behaviour of :meth:`convert`.
This method overrides :meth:`convert`.
Args:
converter (callable): A new converter.
Returns:
A dataset with the new converter.
"""
return chainer.dataset.tabular._with_converter._WithConverter(
self, converter)
def get_example(self, i):
example = self.get_examples([i], None)
example = tuple(col[0] for col in example)
if self.mode is tuple:
return example
elif self.mode is dict:
return dict(six.moves.zip(self.keys, example))
elif self.mode is None:
return example[0]
def __iter__(self):
return (self.get_example(i) for i in six.moves.range(len(self)))
def _as_array(data):
if isinstance(data, chainer.get_array_types()):
return data
else:
device = chainer.backend.get_device_from_array(data[0])
with chainer.using_device(device):
return device.xp.asarray(data)
| 8,797
| 29.978873
| 79
|
py
|
chainer
|
chainer-master/chainer/links/__init__.py
|
"""Collection of :class:`~chainer.Link` implementations."""
from chainer.links.activation.maxout import Maxout # NOQA
from chainer.links.activation.prelu import PReLU # NOQA
from chainer.links.activation.simplified_dropconnect import SimplifiedDropconnect # NOQA
from chainer.links.activation.swish import Swish # NOQA
from chainer.links.connection.bias import Bias # NOQA
from chainer.links.connection.bilinear import Bilinear # NOQA
from chainer.links.connection.convolution_2d import Convolution2D # NOQA
from chainer.links.connection.convolution_nd import Convolution1D # NOQA
from chainer.links.connection.convolution_nd import Convolution3D # NOQA
from chainer.links.connection.convolution_nd import ConvolutionND # NOQA
from chainer.links.connection.deconvolution_2d import Deconvolution2D # NOQA
from chainer.links.connection.deconvolution_nd import Deconvolution1D # NOQA
from chainer.links.connection.deconvolution_nd import Deconvolution3D # NOQA
from chainer.links.connection.deconvolution_nd import DeconvolutionND # NOQA
from chainer.links.connection.deformable_convolution_2d import DeformableConvolution2D # NOQA
from chainer.links.connection.depthwise_convolution_2d import DepthwiseConvolution2D # NOQA
from chainer.links.connection.dilated_convolution_2d import DilatedConvolution2D # NOQA
from chainer.links.connection.embed_id import EmbedID # NOQA
from chainer.links.connection.highway import Highway # NOQA
from chainer.links.connection.inception import Inception # NOQA
from chainer.links.connection.inceptionbn import InceptionBN # NOQA
from chainer.links.connection.linear import Linear # NOQA
from chainer.links.connection.local_convolution_2d import LocalConvolution2D # NOQA
from chainer.links.connection.mlp_convolution_2d import MLPConvolution2D # NOQA
from chainer.links.connection.parameter import Parameter # NOQA
from chainer.links.connection.scale import Scale # NOQA
from chainer.links.loss.black_out import BlackOut # NOQA
from chainer.links.loss.crf1d import CRF1d # NOQA
from chainer.links.loss.hierarchical_softmax import BinaryHierarchicalSoftmax # NOQA
from chainer.links.loss.negative_sampling import NegativeSampling # NOQA
from chainer.links.model.classifier import Classifier # NOQA
from chainer.links.model.vision.googlenet import GoogLeNet # NOQA
from chainer.links.model.vision.resnet import ResNet101Layers # NOQA
from chainer.links.model.vision.resnet import ResNet152Layers # NOQA
from chainer.links.model.vision.resnet import ResNet50Layers # NOQA
from chainer.links.model.vision.vgg import VGG16Layers # NOQA
from chainer.links.model.vision.vgg import VGG19Layers # NOQA
from chainer.links.normalization.batch_normalization import BatchNormalization # NOQA
from chainer.links.normalization.batch_renormalization import BatchRenormalization # NOQA
from chainer.links.normalization.decorrelated_batch_normalization import DecorrelatedBatchNormalization # NOQA
from chainer.links.normalization.group_normalization import GroupNormalization # NOQA
from chainer.links.normalization.layer_normalization import LayerNormalization # NOQA
from chainer.links.rnn.gru import GRU # NOQA
from chainer.links.rnn.gru import StatefulGRU # NOQA
from chainer.links.rnn.gru import StatelessGRU # NOQA
from chainer.links.rnn.lstm import LSTM # NOQA
from chainer.links.rnn.lstm import StatelessLSTM # NOQA
from chainer.links.rnn.mgu import StatefulMGU # NOQA
from chainer.links.rnn.mgu import StatelessMGU # NOQA
from chainer.links.rnn.n_step_gru import NStepBiGRU # NOQA
from chainer.links.rnn.n_step_gru import NStepGRU # NOQA
from chainer.links.rnn.n_step_lstm import NStepBiLSTM # NOQA
from chainer.links.rnn.n_step_lstm import NStepLSTM # NOQA
from chainer.links.rnn.n_step_rnn import NStepBiRNNReLU # NOQA
from chainer.links.rnn.n_step_rnn import NStepBiRNNTanh # NOQA
from chainer.links.rnn.n_step_rnn import NStepRNNReLU # NOQA
from chainer.links.rnn.n_step_rnn import NStepRNNTanh # NOQA
from chainer.links.rnn.peephole import StatefulPeepholeLSTM # NOQA
from chainer.links.rnn.tree_lstm import ChildSumTreeLSTM # NOQA
from chainer.links.rnn.tree_lstm import NaryTreeLSTM # NOQA
from chainer.links.rnn.zoneoutlstm import StatefulZoneoutLSTM # NOQA
from chainer.links.theano.theano_function import TheanoFunction # NOQA
| 4,325
| 65.553846
| 111
|
py
|
chainer
|
chainer-master/chainer/links/theano/theano_function.py
|
import collections
from chainer.functions.theano import theano_function
from chainer import link
from chainer.utils import collections_abc
def _to_var_tuple(vs):
import theano
msg = ('inputs and outputs must be a TensorVariable, a list '
'of TensorVariable or a tuple of TensorVariable')
if isinstance(vs, theano.tensor.TensorVariable):
return vs,
elif isinstance(vs, collections_abc.Iterable):
vs = tuple(vs)
if not all(isinstance(v, theano.tensor.TensorVariable) for v in vs):
raise TypeError(msg)
return vs
else:
raise TypeError(msg)
class TheanoFunction(link.Link):
"""Theano function wrapper.
.. warning::
This feature is experimental. The interface can change in the future.
This function wraps Theano function as a :class:`chainer.Link`.
A user needs to make input Theano variables and output Theano variables.
This function automatically creates Theano function for forward calculation
and backward calculation from inputs and ouptuts. And then, it sends data
in :class:`chainer.Variable` to the function and gets results from Theano.
.. rubric:: Example
.. doctest::
# See chainer/chainer#5997
:skipif: doctest_helper.skipif_requires_satisfied( \
'Theano<=1.0.3', 'numpy>=1.16.0')
>>> import theano
>>> x = theano.tensor.fvector()
>>> y = theano.tensor.fvector()
>>> z = x + y
>>> w = x - y
>>> f = L.TheanoFunction(inputs=[x, y], outputs=[z, w])
>>> a = chainer.Variable(np.array([1, 2], dtype=np.float32))
>>> b = chainer.Variable(np.array([2, 3], dtype=np.float32))
>>> c, d = f(a, b)
>>> c.array
array([3., 5.], dtype=float32)
>>> d.array
array([-1., -1.], dtype=float32)
.. note::
The current implementation always copies :class:`cupy.ndarray` to CPU.
Args:
inputs (tuple of ``theano.tensor.TensorVariable``): Input variables of
Theano. This function accepts the same number of
:class:`~chainer.Variable`\\ s in forward computation.
outputs (tuple of ``theano.tensor.TensorVariable``):
Output variables of Theano.
The function returns the same number of
:class:`~chainer.Variable`\\ s as ``outputs``.
"""
def __init__(self, inputs, outputs):
try:
# When Theano library is imported, it executes a lot of
# initialization process. To minimize its side effect,
# we need import theano here.
import theano
except ImportError:
msg = '''theano is not installed on your environment.
Please install theano to activate theano function.
$ pip install theano'''
raise RuntimeError(msg)
super(TheanoFunction, self).__init__()
inputs = _to_var_tuple(inputs)
outputs = _to_var_tuple(outputs)
# TODO(unno): We can remove redundant gpu-cpu copy using
# theano.sandbox.cuda.basic_ops.gpu_from_host
self.forward_func = theano.function(inputs=inputs, outputs=outputs)
gs = tuple(
o.type('g_{}'.format(i)) for i, o in enumerate(outputs))
known_grads = collections.OrderedDict(zip(outputs, gs))
grad = theano.tensor.grad(
cost=None, wrt=inputs, known_grads=known_grads,
disconnected_inputs='ignore')
self.backward_func = theano.function(
inputs=inputs + gs,
outputs=grad,
on_unused_input='ignore')
def forward(self, *args):
return theano_function.theano_function(
self.forward_func, self.backward_func, *args)
| 3,740
| 32.702703
| 79
|
py
|
chainer
|
chainer-master/chainer/links/theano/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/links/normalization/group_normalization.py
|
import numpy
import chainer
from chainer.functions.normalization import group_normalization
from chainer import initializers
from chainer import link
from chainer import variable
class GroupNormalization(link.Link):
"""Group normalization layer on outputs of convolution functions.
This link implements a "group normalization"
which divides the channels into groups and computes within each group
the mean and variance, then normalize by these statistics,
scales and shifts them.
Parameter initialization will be deferred until
the first forward data pass at which time the size will be determined.
Args:
groups (int):
The number of channel groups.
This value must be a divisor of the number of channels.
size (int): Size of input units. If ``None``, parameter initialization
will be deferred until the first forward data pass at which time
the size will be determined.
eps (float): Epsilon value for numerical stability of normalization.
initial_gamma (~chainer.Initializer): Initializer for
scaling parameter.
If ``None``, then the vector is filled by 1.
If a scalar, the vector is filled by it.
If ``numpy.ndarray``, the vector is set by it.
initial_beta (~chainer.Initializer): Initializer for
shifting parameter.
If ``None``, then the vector is filled by 0.
If a scalar, the vector is filled by it.
If ``numpy.ndarray``, the vector is set by it.
Attributes:
groups (int): The number of channel groups.
gamma (~chainer.Parameter): Scaling parameter.
beta (~chainer.Parameter): Shifting parameter.
~GroupNormalization.eps (float): Epsilon value for numerical stability.
See: `Group Normalization <https://arxiv.org/abs/1803.08494>`_
"""
def __init__(self, groups, size=None, eps=1e-5, initial_gamma=None,
initial_beta=None):
super(GroupNormalization, self).__init__()
if initial_gamma is None:
initial_gamma = 1
if initial_beta is None:
initial_beta = 0
highprec_dtype = chainer.get_dtype(
None, map_mixed16=numpy.float32)
with self.init_scope():
self.groups = groups
gamma_initializer = \
initializers._get_initializer(initial_gamma)
gamma_initializer.dtype = highprec_dtype
beta_initializer = \
initializers._get_initializer(initial_beta)
beta_initializer.dtype = highprec_dtype
self.gamma = variable.Parameter(gamma_initializer)
self.beta = variable.Parameter(beta_initializer)
self.eps = eps
if size is not None:
self._initialize_params(size)
def _initialize_params(self, size):
self.gamma.initialize(size)
self.beta.initialize(size)
def forward(self, x):
"""Apply group normalization to given input.
Args:
x (~chainer.Variable): Batch tensors.
First dimension of this value must be the size of minibatch and
second dimension must be the number of channels.
Moreover, this value must have one or more following
dimensions, such as height and width.
Returns:
~chainer.Variable: Output of the group normalization.
"""
if self.gamma.array is None:
if x.ndim < 2:
raise ValueError('Input dimension must be at least 2, '
'including batch size dimension '
'(first dimension).')
channels = x.shape[1]
self._initialize_params(channels)
return group_normalization.group_normalization(
x, self.groups, self.gamma, self.beta, self.eps)
| 3,944
| 37.676471
| 79
|
py
|
chainer
|
chainer-master/chainer/links/normalization/layer_normalization.py
|
from chainer.functions.normalization import layer_normalization
from chainer import link
from chainer import utils
from chainer import variable
class LayerNormalization(link.Link):
"""Layer normalization layer on outputs of linear functions.
.. warning::
This feature is experimental. The interface can change in the future.
This link implements a "layer normalization" layer
which normalizes the input units by statistics
that are computed along the second axis,
scales and shifts them.
Parameter initialization will be deferred until
the first forward data pass at which time the size will be determined.
Args:
size (int): Size of input units. If ``None``, parameter initialization
will be deferred until the first forward data pass at which time
the size will be determined.
eps (float): Epsilon value for numerical stability of normalization.
initial_gamma (~chainer.Initializer): Initializer for scaling vector.
If ``None``, then the vector is filled by 1.
If a scalar, the vector is filled by it.
If ``numpy.ndarray``, the vector is set by it.
initial_beta (~chainer.Initializer): Initializer for shifting vector.
If ``None``, then the vector is filled by 0.
If a scalar, the vector is filled by it.
If ``numpy.ndarray``, the vector is set by it.
Attributes:
gamma (~chainer.Parameter): Scaling parameter.
beta (~chainer.Parameter): Shifting parameter.
eps (float): Epsilon value for numerical stability.
See: `Layer Normalization <https://arxiv.org/abs/1607.06450>`_
"""
def __init__(self, size=None, eps=1e-6, initial_gamma=None,
initial_beta=None):
super(LayerNormalization, self).__init__()
if initial_gamma is None:
initial_gamma = 1
if initial_beta is None:
initial_beta = 0
with self.init_scope():
self.gamma = variable.Parameter(initial_gamma)
self.beta = variable.Parameter(initial_beta)
self.eps = eps
if size is not None:
self._initialize_params(size)
def _initialize_params(self, size):
self.gamma.initialize(size)
self.beta.initialize(size)
def forward(self, x):
"""Apply layer normalization to given input.
Args:
x (~chainer.Variable): Batch vectors.
Shape of this value must be `(batch_size, unit_size)`,
e.g., the output of :func:`~chainer.functions.linear`.
Returns:
~chainer.Variable: Output of the layer normalization.
"""
if self.gamma.array is None:
in_size = utils.size_of_shape(x.shape[1:])
self._initialize_params(in_size)
return layer_normalization.layer_normalization(
x, self.gamma, self.beta, self.eps)
| 2,961
| 34.686747
| 78
|
py
|
chainer
|
chainer-master/chainer/links/normalization/batch_renormalization.py
|
import chainer
from chainer import configuration
from chainer.functions.normalization import batch_normalization
from chainer.functions.normalization import batch_renormalization
from chainer.links.normalization.batch_normalization import BatchNormalization
class BatchRenormalization(BatchNormalization):
"""Batch renormalization layer on outputs of linear or convolution functions.
This link wraps the :func:`~chainer.functions.batch_renormalization` and
:func:`~chainer.functions.fixed_batch_renormalization` functions.
This is an extension of batch normalization, which ensures that the
training and inference models generate the same outputs that depend on
individual examples rather than the entire minibatch.
See: `Batch Renormalization: Towards Reducing Minibatch Dependence in
Batch-Normalized Models <https://arxiv.org/abs/1702.03275>`_
.. seealso::
:func:`~chainer.functions.batch_renormalization`,
:func:`~chainer.functions.fixed_batch_renormalization`
:func:`~chainer.functions.batch_normalization`,
"""
def __init__(self, size, rmax=1, dmax=0, decay=0.9, eps=2e-5,
dtype=None, use_gamma=True, use_beta=True,
initial_gamma=None, initial_beta=None,
initial_avg_mean=None, initial_avg_var=None):
super(BatchRenormalization, self).__init__(
size, decay, eps, dtype, use_gamma, use_beta,
initial_gamma, initial_beta, initial_avg_mean, initial_avg_var)
self.rmax = rmax # maximum allowed correction of variance
self.dmax = dmax # maximum allowed correction of mean
self.r = None
self.d = None
def forward(self, x, finetune=False):
if self.gamma is not None:
gamma = self.gamma
else:
with chainer.using_device(self.device):
gamma = self.xp.ones(
self.avg_mean.shape, dtype=x.dtype)
if self.beta is not None:
beta = self.beta
else:
with chainer.using_device(self.device):
beta = self.xp.zeros(
self.avg_mean.shape, dtype=x.dtype)
if configuration.config.train:
if finetune:
self.N += 1
decay = 1. - 1. / self.N
else:
decay = self.decay
avg_mean = self.avg_mean
avg_var = self.avg_var
update_statistics = True
if chainer.config.in_recomputing:
# Do not update statistics when extra forward computation is
# called.
if finetune:
self.N -= 1 # Revert the count
avg_mean = self._prev_avg_mean
avg_var = self._prev_avg_var
update_statistics = False
elif chainer.config._will_recompute:
self._prev_avg_mean = avg_mean.copy()
self._prev_avg_var = avg_var.copy()
ret = batch_renormalization.batch_renormalization(
x, gamma, beta, self.rmax, self.dmax,
self.eps, avg_mean, avg_var, decay,
update_statistics=update_statistics)
else:
# Use running average statistics or fine-tuned statistics.
mean = self.avg_mean
var = self.avg_var
ret = batch_normalization.fixed_batch_normalization(
x, gamma, beta, mean, var, self.eps)
return ret
| 3,511
| 38.022222
| 81
|
py
|
chainer
|
chainer-master/chainer/links/normalization/decorrelated_batch_normalization.py
|
import functools
import warnings
import numpy
import chainer
from chainer import configuration
from chainer import functions
from chainer import link
import chainer.serializer as serializer_mod
from chainer.utils import argument
class DecorrelatedBatchNormalization(link.Link):
"""Decorrelated batch normalization layer.
This link wraps the
:func:`~chainer.functions.decorrelated_batch_normalization` and
:func:`~chainer.functions.fixed_decorrelated_batch_normalization`
functions. It works on outputs of linear or convolution functions.
It runs in three modes: training mode, fine-tuning mode, and testing mode.
In training mode, it normalizes the input by *batch statistics*. It also
maintains approximated population statistics by moving averages, which can
be used for instant evaluation in testing mode.
In fine-tuning mode, it accumulates the input to compute *population
statistics*. In order to correctly compute the population statistics, a
user must use this mode to feed mini-batches running through whole training
dataset.
In testing mode, it uses pre-computed population statistics to normalize
the input variable. The population statistics is approximated if it is
computed by training mode, or accurate if it is correctly computed by
fine-tuning mode.
Args:
size (int or tuple of ints): Size (or shape) of channel
dimensions.
groups (int): Number of groups to use for group whitening.
decay (float): Decay rate of moving average
which is used during training.
eps (float): Epsilon value for numerical stability.
dtype (numpy.dtype): Type to use in computing.
See: `Decorrelated Batch Normalization <https://arxiv.org/abs/1804.08450>`_
.. seealso::
:func:`~chainer.functions.decorrelated_batch_normalization`,
:func:`~chainer.functions.fixed_decorrelated_batch_normalization`
Attributes:
avg_mean (:ref:`ndarray`): Population mean.
avg_projection (:ref:`ndarray`): Population
projection.
groups (int): Number of groups to use for group whitening.
N (int): Count of batches given for fine-tuning.
decay (float): Decay rate of moving average
which is used during training.
~DecorrelatedBatchNormalization.eps (float): Epsilon value for
numerical stability. This value is added to the batch variances.
"""
def __init__(self, size, groups=16, decay=0.9, eps=2e-5,
dtype=numpy.float32):
super(DecorrelatedBatchNormalization, self).__init__()
C = size // groups
self.avg_mean = numpy.zeros((groups, C), dtype=dtype)
self.register_persistent('avg_mean')
avg_projection = numpy.zeros((groups, C, C), dtype=dtype)
arange_C = numpy.arange(C)
avg_projection[:, arange_C, arange_C] = 1
self.avg_projection = avg_projection
self.register_persistent('avg_projection')
self.N = 0
self.register_persistent('N')
self.decay = decay
self.eps = eps
self.groups = groups
def serialize(self, serializer):
if isinstance(serializer, serializer_mod.Deserializer):
serializer = _PatchedDeserializer(serializer, {
'avg_mean': functools.partial(
fix_avg_mean, groups=self.groups),
'avg_projection': functools.partial(
fix_avg_projection, groups=self.groups),
})
super(DecorrelatedBatchNormalization, self).serialize(serializer)
def forward(self, x, **kwargs):
"""forward(self, x, *, finetune=False)
Invokes the forward propagation of DecorrelatedBatchNormalization.
In training mode, the DecorrelatedBatchNormalization computes moving
averages of the mean and projection for evaluation during training,
and normalizes the input using batch statistics.
Args:
x (:class:`~chainer.Variable`): Input variable.
finetune (bool): If it is in the training mode and ``finetune`` is
``True``, DecorrelatedBatchNormalization runs in fine-tuning
mode; it accumulates the input array to compute population
statistics for normalization, and normalizes the input using
batch statistics.
"""
finetune, = argument.parse_kwargs(kwargs, ('finetune', False))
if configuration.config.train:
if finetune:
self.N += 1
decay = 1. - 1. / self.N
else:
decay = self.decay
avg_mean = self.avg_mean
avg_projection = self.avg_projection
if configuration.config.in_recomputing:
# Do not update statistics when extra forward computation is
# called.
if finetune:
self.N -= 1
avg_mean = None
avg_projection = None
ret = functions.decorrelated_batch_normalization(
x, groups=self.groups, eps=self.eps,
running_mean=avg_mean, running_projection=avg_projection,
decay=decay)
else:
# Use running average statistics or fine-tuned statistics.
mean = self.avg_mean
projection = self.avg_projection
ret = functions.fixed_decorrelated_batch_normalization(
x, mean, projection, groups=self.groups)
return ret
def start_finetuning(self):
"""Resets the population count for collecting population statistics.
This method can be skipped if it is the first time to use the
fine-tuning mode. Otherwise, this method should be called before
starting the fine-tuning mode again.
"""
self.N = 0
class _PatchedDeserializer(serializer_mod.Deserializer):
def __init__(self, base, patches):
self.base = base
self.patches = patches
def __repr__(self):
return '_PatchedDeserializer({}, {})'.format(
repr(self.base), repr(self.patches))
def __call__(self, key, value):
if key not in self.patches:
return self.base(key, value)
arr = self.base(key, None)
arr = self.patches[key](arr)
if value is None:
return arr
chainer.backend.copyto(value, arr)
return value
def _warn_old_model():
msg = (
'Found moving statistics of old DecorrelatedBatchNormalization, whose '
'algorithm was different from the paper.')
warnings.warn(msg)
def fix_avg_mean(avg_mean, groups):
if avg_mean.ndim == 2: # OK
return avg_mean
elif avg_mean.ndim == 1: # Issue #7706
if groups != 1:
_warn_old_model()
return _broadcast_to(avg_mean, (groups,) + avg_mean.shape)
raise ValueError('unexpected shape of avg_mean')
def fix_avg_projection(avg_projection, groups):
if avg_projection.ndim == 3: # OK
return avg_projection
elif avg_projection.ndim == 2: # Issue #7706
if groups != 1:
_warn_old_model()
return _broadcast_to(
avg_projection, (groups,) + avg_projection.shape)
raise ValueError('unexpected shape of avg_projection')
def _broadcast_to(array, shape):
if hasattr(numpy, 'broadcast_to'):
return numpy.broadcast_to(array, shape)
else:
# numpy 1.9 doesn't support broadcast_to method
dummy = numpy.empty(shape)
bx, _ = numpy.broadcast_arrays(array, dummy)
return bx
| 7,674
| 35.20283
| 79
|
py
|
chainer
|
chainer-master/chainer/links/normalization/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/links/normalization/batch_normalization.py
|
import numpy
import six
import chainer
from chainer import configuration
from chainer import functions
from chainer import initializers
from chainer import link
from chainer.graph_optimizations.static_graph_utilities import static_code
from chainer.utils import argument
from chainer import variable
class BatchNormalization(link.Link):
"""Batch normalization layer on outputs of linear or convolution functions.
This link wraps the :func:`~chainer.functions.batch_normalization` and
:func:`~chainer.functions.fixed_batch_normalization` functions.
It runs in three modes: training mode, fine-tuning mode, and testing mode.
In training mode, it normalizes the input by *batch statistics*. It also
maintains approximated population statistics by moving averages, which can
be used for instant evaluation in testing mode. Training mode is enabled
when ``chainer.config.train`` is set to ``True`` and :meth:`__call__`
is invoked with ``finetune=False`` (the default is False).
In fine-tuning mode, it accumulates the input to compute *population
statistics*. In order to correctly compute the population statistics, a
user must use this mode to feed mini-batches running through whole training
dataset. Finetuning mode is enabled when ``chainer.config.train`` is set to
``True`` and :meth:`__call__` is invoked with ``finetune=True``.
In testing mode, it uses pre-computed population statistics to normalize
the input variable. The population statistics is approximated if it is
computed by training mode, or accurate if it is correctly computed by
fine-tuning mode. Testing mode is enabled when ``chainer.config.train``
is set to ``False``.
Args:
size (int, tuple of ints, or None): Size (or shape) of channel
dimensions. If ``None``, the size will be determined from
dimension(s) of the input batch during the first forward pass.
decay (float): Decay rate of moving average. It is used on training.
eps (float): Epsilon value for numerical stability.
dtype (numpy.dtype): Type to use in computing.
use_gamma (bool): If ``True``, use scaling parameter. Otherwise, use
unit(1) which makes no effect.
use_beta (bool): If ``True``, use shifting parameter. Otherwise, use
unit(0) which makes no effect.
axis (int or tuple of int): Axis over which normalization is
performed. When axis is ``None``, it is determined from input
dimensions. For example, if ``x.ndim`` is 4, axis becomes (0, 2, 3)
and normalization is performed over 0th, 2nd and 3rd axis of input.
If it is 2, axis becomes (0) and normalization is performed
over 0th axis of input. When a tuple of int is given to this
option, numbers in the tuple must be being sorted in ascending
order. For example, (0, 2) is OK, but (2, 0) is not.
initial_gamma: Initializer of the scaling parameter. The default value
is ``1``.
initial_beta: Initializer of the shifting parameter. The default value
is ``0``.
initial_avg_mean: Initializer of the moving average of population mean.
The default value is ``0``.
initial_avg_var: Initializer of the moving average of population
variance. The default value is ``1``.
.. note::
From v5.0.0, the initial value of the population variance is changed to
1. It does not change the behavior of training, but the resulting model
may have a slightly different behavior on inference. To emulate the
old behavior, pass ``initial_avg_var=0`` for training.
See: `Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`_
.. seealso::
:func:`~chainer.functions.batch_normalization`,
:func:`~chainer.functions.fixed_batch_normalization`
Attributes:
gamma (~chainer.Variable): Scaling parameter. In mixed16 mode, it is
initialized as float32 variable.
beta (~chainer.Variable): Shifting parameter. In mixed16 mode, it is
initialized as float32 variable.
avg_mean (:ref:`ndarray`): Population mean. In mixed16 mode, it is
initialized as float32 array.
avg_var (:ref:`ndarray`): Population variance. In mixed16 mode, it is
initialized as float32 array.
N (int): Count of batches given for fine-tuning.
decay (float): Decay rate of moving average. It is used on training.
eps (float): Epsilon value for numerical stability. This value is added
to the batch variances.
.. admonition:: Example
>>> x = np.arange(12).reshape(4, 3).astype(np.float32) ** 2
>>> x
array([[ 0., 1., 4.],
[ 9., 16., 25.],
[ 36., 49., 64.],
[ 81., 100., 121.]], dtype=float32)
>>> bn = chainer.links.BatchNormalization(3)
>>> bn(x)
variable([[-1. , -1.0664359 , -1.1117983 ],
[-0.71428573, -0.6714596 , -0.6401263 ],
[ 0.14285715, 0.19748813, 0.23583598],
[ 1.5714287 , 1.5404074 , 1.5160885 ]])
>>> (x - x.mean(axis=0)) / np.sqrt(x.var(axis=0) + 2e-5)
array([[-1. , -1.0664359 , -1.1117983 ],
[-0.71428573, -0.6714596 , -0.6401263 ],
[ 0.14285715, 0.19748813, 0.235836 ],
[ 1.5714285 , 1.5404074 , 1.5160886 ]], dtype=float32)
There are several ways to make a BatchNormalization link.
Consider an input of batched 10 images of 32x32 with 3 channels.
>>> x = np.random.randn(10, 3, 32, 32).astype(np.float32)
1. Give the parameter size:
To normalize for each channel, give the number of channels
to ``size``.
>>> bn = chainer.links.BatchNormalization(3)
>>> bn.avg_mean.shape
(3,)
>>> bn.beta += 2.0
>>> bn.gamma *= 5.0
>>> list(sorted(bn.namedparams())) # doctest: +ELLIPSIS
[('/beta', variable([2., ...])), ('/gamma', variable([5., ...]))]
>>> y = bn(x)
>>> y.shape
(10, 3, 32, 32)
>>> np.testing.assert_allclose(
... y.array.mean(axis=(0, 2, 3)), bn.beta.array, atol=1e-6)
>>> np.testing.assert_allclose(
... y.array.std(axis=(0, 2, 3)),
... bn.gamma.array, atol=1e-3)
To normalize for each channel for each pixel, ``size`` should
be the tuple of the dimensions.
>>> bn = chainer.links.BatchNormalization((3, 32, 32))
>>> bn.avg_mean.shape
(3, 32, 32)
>>> y = bn(x)
>>> y.shape
(10, 3, 32, 32)
>>> np.testing.assert_allclose(
... y.array.mean(axis=0), bn.beta.array, atol=1e-6)
>>> np.testing.assert_allclose(
... y.array.std(axis=0),
... bn.gamma.array, atol=1e-3)
By default, channel axis is (or starts from) the 1st axis of the
input shape.
2. Give the aggregate axes:
from Chainer v5
With ``axis`` option, similarly to NumPy, you may specify the
aggregate axes, which are treated as the "batch" axes for the
batch statistics.
You can omit ``size`` if ``axis`` is given. In this case, creation
of persistent values ``avg_mean``, ``avg_var`` and parameters
``beta``, ``gamma`` is deferred until first forward propagation.
The examples in 1. corresponds to the following, respectively.
>>> bn = chainer.links.BatchNormalization(axis=(0, 2, 3))
>>> print(bn.avg_mean)
None
>>> y = bn(x)
>>> bn.avg_mean.shape
(3,)
>>> bn = chainer.links.BatchNormalization(axis=0)
>>> print(bn.avg_mean)
None
>>> y = bn(x)
>>> bn.avg_mean.shape
(3, 32, 32)
"""
gamma = None
beta = None
def __init__(self, size=None, decay=0.9, eps=2e-5, dtype=None,
use_gamma=True, use_beta=True,
initial_gamma=None, initial_beta=None, axis=None,
initial_avg_mean=None, initial_avg_var=None):
super(BatchNormalization, self).__init__()
if size is None and axis is None:
raise RuntimeError('size or axis is required')
self._initial_avg_mean = initial_avg_mean
self._initial_avg_var = initial_avg_var
self.N = 0
self.register_persistent('N')
self.decay = decay
self.eps = eps
if isinstance(axis, six.integer_types):
axis = (axis,)
self.axis = axis
self._highprec_dtype = chainer.get_dtype(
dtype, map_mixed16=numpy.float32)
with self.init_scope():
if use_gamma:
if initial_gamma is None:
initial_gamma = 1
gamma_initializer = \
initializers._get_initializer(initial_gamma)
gamma_initializer.dtype = self._highprec_dtype
self.gamma = variable.Parameter(gamma_initializer)
if use_beta:
if initial_beta is None:
initial_beta = 0
beta_initializer = initializers._get_initializer(initial_beta)
beta_initializer.dtype = self._highprec_dtype
self.beta = variable.Parameter(beta_initializer)
if size is None:
self.avg_mean = None
self.avg_var = None
else:
self._initialize_params(size)
self.register_persistent('avg_mean')
self.register_persistent('avg_var')
def _initialize_params(self, shape):
self.avg_mean = self._init_array(self._initial_avg_mean, 0, shape)
self._initial_avg_mean = None
self.avg_var = self._init_array(self._initial_avg_var, 1, shape)
self._initial_avg_var = None
if self.gamma is not None:
self.gamma.initialize(shape)
if self.beta is not None:
self.beta.initialize(shape)
def _init_array(self, initializer, default_value, size):
if initializer is None:
initializer = default_value
initializer = initializers._get_initializer(initializer)
return initializers.generate_array(
initializer, size, self.xp, dtype=self._highprec_dtype,
device=self.device)
@static_code
def _get_gamma(self):
with chainer.using_device(self.device):
gamma = self.xp.ones(
self.avg_mean.shape, dtype=self._highprec_dtype)
return gamma,
@static_code
def _get_beta(self):
with chainer.using_device(self.device):
beta = self.xp.zeros(
self.avg_mean.shape, dtype=self._highprec_dtype)
return beta,
@property
def printable_specs(self):
specs = [
('size', self.avg_mean.shape[0]),
('decay', self.decay),
('eps', self.eps),
('dtype', self.avg_mean.dtype),
('use_gamma', hasattr(self, 'gamma')),
('use_beta', hasattr(self, 'beta')),
]
for spec in specs:
yield spec
def forward(self, x, **kwargs):
"""forward(self, x, finetune=False)
Invokes the forward propagation of BatchNormalization.
In training mode, the BatchNormalization computes moving averages of
mean and variance for evaluation during training, and normalizes the
input using batch statistics.
Args:
x (~chainer.Variable): Input variable.
finetune (bool): If it is in the training mode and ``finetune`` is
``True``, BatchNormalization runs in fine-tuning mode; it
accumulates the input array to compute population statistics
for normalization, and normalizes the input using batch
statistics.
"""
finetune, = argument.parse_kwargs(
kwargs, ('finetune', False),
test='test argument is not supported anymore. '
'Use chainer.using_config')
if self.avg_mean is None:
param_shape = tuple([
d
for i, d in enumerate(x.shape)
if i not in self.axis])
self._initialize_params(param_shape)
# When using static_graph optimizations beta or gamma might not be
# initialized and is not retained by the function, so the
# static forward pass will get a None instead
gamma = self.gamma
if gamma is None:
gamma, = self._get_gamma()
beta = self.beta
if beta is None:
beta, = self._get_beta()
if configuration.config.train:
if finetune:
self.N += 1
decay = 1. - 1. / self.N
else:
decay = self.decay
avg_mean = self.avg_mean
avg_var = self.avg_var
if chainer.config.in_recomputing:
# Do not update statistics when extra forward computation is
# called.
if finetune:
self.N -= 1 # Revert the count
avg_mean = None
avg_var = None
ret = functions.batch_normalization(
x, gamma, beta, eps=self.eps, running_mean=avg_mean,
running_var=avg_var, decay=decay, axis=self.axis)
else:
# Use running average statistics or fine-tuned statistics.
mean = self.avg_mean
var = self.avg_var
ret = functions.fixed_batch_normalization(
x, gamma, beta, mean, var, self.eps, axis=self.axis)
return ret
def start_finetuning(self):
"""Resets the population count for collecting population statistics.
This method can be skipped if it is the first time to use the
fine-tuning mode. Otherwise, this method should be called before
starting the fine-tuning mode again.
"""
self.N = 0
| 14,478
| 38.887052
| 79
|
py
|
chainer
|
chainer-master/chainer/links/rnn/lstm.py
|
import six
import chainer
from chainer.functions.array import concat
from chainer.functions.array import split_axis
from chainer.functions.rnn import lstm
from chainer import initializers
from chainer import link
from chainer.links.connection import linear
from chainer import utils
from chainer import variable
class LSTMBase(link.Chain):
def __init__(self, in_size, out_size=None, lateral_init=None,
upward_init=None, bias_init=None, forget_bias_init=None):
if out_size is None:
out_size, in_size = in_size, None
super(LSTMBase, self).__init__()
if bias_init is None:
bias_init = 0
if forget_bias_init is None:
forget_bias_init = 1
self.state_size = out_size
self.lateral_init = lateral_init
self.upward_init = upward_init
self.bias_init = bias_init
self.forget_bias_init = forget_bias_init
with self.init_scope():
self.upward = linear.Linear(in_size, 4 * out_size, initialW=0)
self.lateral = linear.Linear(out_size, 4 * out_size, initialW=0,
nobias=True)
if in_size is not None:
self._initialize_params()
def _initialize_params(self):
lateral_init = initializers._get_initializer(self.lateral_init)
upward_init = initializers._get_initializer(self.upward_init)
bias_init = initializers._get_initializer(self.bias_init)
forget_bias_init = initializers._get_initializer(self.forget_bias_init)
for i in six.moves.range(0, 4 * self.state_size, self.state_size):
lateral_init(self.lateral.W.array[i:i + self.state_size, :])
upward_init(self.upward.W.array[i:i + self.state_size, :])
a, i, f, o = lstm._extract_gates(
self.upward.b.array.reshape(1, 4 * self.state_size, 1))
bias_init(a)
bias_init(i)
forget_bias_init(f)
bias_init(o)
class StatelessLSTM(LSTMBase):
"""Stateless LSTM layer.
This is a fully-connected LSTM layer as a chain. Unlike the
:func:`~chainer.functions.lstm` function, this chain holds upward and
lateral connections as child links. This link doesn't keep cell and
hidden states.
Args:
in_size (int or None): Dimension of input vectors. If ``None``,
parameter initialization will be deferred until the first forward
data pass at which time the size will be determined.
out_size (int): Dimensionality of output vectors.
Attributes:
upward (chainer.links.Linear): Linear layer of upward connections.
lateral (chainer.links.Linear): Linear layer of lateral connections.
.. admonition:: Example
There are several ways to make a StatelessLSTM link.
Let a two-dimensional input array :math:`x`, a cell state array
:math:`h`, and the output array of the previous step :math:`h` be:
>>> x = np.zeros((1, 10), dtype=np.float32)
>>> c = np.zeros((1, 20), dtype=np.float32)
>>> h = np.zeros((1, 20), dtype=np.float32)
1. Give both ``in_size`` and ``out_size`` arguments:
>>> l = L.StatelessLSTM(10, 20)
>>> c_new, h_new = l(c, h, x)
>>> c_new.shape
(1, 20)
>>> h_new.shape
(1, 20)
2. Omit ``in_size`` argument or fill it with ``None``:
The below two cases are the same.
>>> l = L.StatelessLSTM(20)
>>> c_new, h_new = l(c, h, x)
>>> c_new.shape
(1, 20)
>>> h_new.shape
(1, 20)
>>> l = L.StatelessLSTM(None, 20)
>>> c_new, h_new = l(c, h, x)
>>> c_new.shape
(1, 20)
>>> h_new.shape
(1, 20)
"""
def forward(self, c, h, x):
"""Returns new cell state and updated output of LSTM.
Args:
c (~chainer.Variable): Cell states of LSTM units.
h (~chainer.Variable): Output at the previous time step.
x (~chainer.Variable): A new batch from the input sequence.
Returns:
tuple of ~chainer.Variable: Returns ``(c_new, h_new)``, where
``c_new`` represents new cell state, and ``h_new`` is updated
output of LSTM units.
"""
if self.upward.W.array is None:
in_size = x.size // x.shape[0]
with chainer.using_device(self.device):
self.upward._initialize_params(in_size)
self._initialize_params()
lstm_in = self.upward(x)
if h is not None:
lstm_in += self.lateral(h)
if c is None:
xp = self.xp
with chainer.using_device(self.device):
c = variable.Variable(
xp.zeros((x.shape[0], self.state_size), dtype=x.dtype))
return lstm.lstm(c, lstm_in)
class LSTM(LSTMBase):
"""Fully-connected LSTM layer.
This is a fully-connected LSTM layer as a chain. Unlike the
:func:`~chainer.functions.lstm` function, which is defined as a stateless
activation function, this chain holds upward and lateral connections as
child links.
It also maintains *states*, including the cell state and the output
at the previous time step. Therefore, it can be used as a *stateful LSTM*.
This link supports variable length inputs. The mini-batch size of the
current input must be equal to or smaller than that of the previous one.
The mini-batch size of ``c`` and ``h`` is determined as that of the first
input ``x``.
When mini-batch size of ``i``-th input is smaller than that of the previous
input, this link only updates ``c[0:len(x)]`` and ``h[0:len(x)]`` and
doesn't change the rest of ``c`` and ``h``.
So, please sort input sequences in descending order of lengths before
applying the function.
Args:
in_size (int): Dimension of input vectors. If it is ``None`` or
omitted, parameter initialization will be deferred until the first
forward data pass at which time the size will be determined.
out_size (int): Dimensionality of output vectors.
lateral_init: A callable that takes :ref:`ndarray` and edits its value.
It is used for initialization of the lateral connections.
May be ``None`` to use default initialization.
upward_init: A callable that takes :ref:`ndarray` and edits its value.
It is used for initialization of the upward connections.
May be ``None`` to use default initialization.
bias_init: A callable that takes :ref:`ndarray` and edits its value
It is used for initialization of the biases of cell input,
input gate and output gate.and gates of the upward connection.
May be a scalar, in that case, the bias is
initialized by this value.
If it is ``None``, the cell-input bias is initialized to zero.
forget_bias_init: A callable that takes :ref:`ndarray` and edits its
value. It is used for initialization of the biases of the forget
gate of the upward connection.
May be a scalar, in that case, the bias is
initialized by this value.
If it is ``None``, the forget bias is initialized to one.
Attributes:
upward (~chainer.links.Linear): Linear layer of upward connections.
lateral (~chainer.links.Linear): Linear layer of lateral connections.
c (~chainer.Variable): Cell states of LSTM units.
h (~chainer.Variable): Output at the previous time step.
.. admonition:: Example
There are several ways to make a LSTM link.
Let a two-dimensional input array :math:`x` be:
>>> x = np.zeros((1, 10), dtype=np.float32)
1. Give both ``in_size`` and ``out_size`` arguments:
>>> l = L.LSTM(10, 20)
>>> h_new = l(x)
>>> h_new.shape
(1, 20)
2. Omit ``in_size`` argument or fill it with ``None``:
The below two cases are the same.
>>> l = L.LSTM(20)
>>> h_new = l(x)
>>> h_new.shape
(1, 20)
>>> l = L.LSTM(None, 20)
>>> h_new = l(x)
>>> h_new.shape
(1, 20)
"""
def __init__(self, in_size, out_size=None, lateral_init=None,
upward_init=None, bias_init=None, forget_bias_init=None):
if out_size is None:
in_size, out_size = None, in_size
super(LSTM, self).__init__(
in_size, out_size, lateral_init, upward_init, bias_init,
forget_bias_init)
self.reset_state()
def device_resident_accept(self, visitor):
super(LSTM, self).device_resident_accept(visitor)
if self.c is not None:
visitor.visit_variable(self.c)
if self.h is not None:
visitor.visit_variable(self.h)
def set_state(self, c, h):
"""Sets the internal state.
It sets the :attr:`c` and :attr:`h` attributes.
Args:
c (~chainer.Variable): A new cell states of LSTM units.
h (~chainer.Variable): A new output at the previous time step.
"""
assert isinstance(c, variable.Variable)
assert isinstance(h, variable.Variable)
c.to_device(self.device)
h.to_device(self.device)
self.c = c
self.h = h
def reset_state(self):
"""Resets the internal state.
It sets ``None`` to the :attr:`c` and :attr:`h` attributes.
"""
self.c = self.h = None
def forward(self, x):
"""Updates the internal state and returns the LSTM outputs.
Args:
x (~chainer.Variable): A new batch from the input sequence.
Returns:
~chainer.Variable: Outputs of updated LSTM units.
"""
if self.upward.W.array is None:
with chainer.using_device(self.device):
in_size = utils.size_of_shape(x.shape[1:])
self.upward._initialize_params(in_size)
self._initialize_params()
batch = x.shape[0]
lstm_in = self.upward(x)
h_rest = None
if self.h is not None:
h_size = self.h.shape[0]
if batch == 0:
h_rest = self.h
elif h_size < batch:
msg = ('The batch size of x must be equal to or less than '
'the size of the previous state h.')
raise TypeError(msg)
elif h_size > batch:
h_update, h_rest = split_axis.split_axis(
self.h, [batch], axis=0)
lstm_in += self.lateral(h_update)
else:
lstm_in += self.lateral(self.h)
if self.c is None:
with chainer.using_device(self.device):
self.c = variable.Variable(
self.xp.zeros((batch, self.state_size), dtype=x.dtype))
self.c, y = lstm.lstm(self.c, lstm_in)
if h_rest is None:
self.h = y
elif len(y.array) == 0:
self.h = h_rest
else:
self.h = concat.concat([y, h_rest], axis=0)
return y
| 11,379
| 34.673981
| 79
|
py
|
chainer
|
chainer-master/chainer/links/rnn/n_step_gru.py
|
from chainer.functions.rnn import n_step_gru as rnn
from chainer.links.rnn import n_step_rnn
class NStepGRUBase(n_step_rnn.NStepRNNBase):
"""__init__(self, n_layers, in_size, out_size, dropout, use_bi_direction)
Base link class for Stacked GRU/BiGRU links.
This link is base link class for :func:`chainer.links.NStepGRU` and
:func:`chainer.links.NStepBiGRU`.
This link's behavior depends on argument, ``use_bi_direction``.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
use_bi_direction (bool): if ``True``, use Bi-directional GRU.
if ``False``, use Uni-directional GRU.
.. seealso::
:func:`chainer.links.NStepGRU`
:func:`chainer.links.NStepBiGRU`
"""
n_weights = 6
class NStepGRU(NStepGRUBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Uni-directional GRU for sequences.
This link is stacked version of Uni-directional GRU for sequences.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_gru`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_gru`
"""
use_bi_direction = False
def rnn(self, *args):
return rnn.n_step_gru(*args)
@property
def n_cells(self):
return 1
class NStepBiGRU(NStepGRUBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Bi-directional GRU for sequences.
This link is stacked version of Bi-directional GRU for sequences.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_bigru`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_bigru`
"""
use_bi_direction = True
def rnn(self, *args):
return rnn.n_step_bigru(*args)
@property
def n_cells(self):
return 1
| 3,012
| 28.831683
| 78
|
py
|
chainer
|
chainer-master/chainer/links/rnn/zoneoutlstm.py
|
import chainer
from chainer.functions.activation import sigmoid
from chainer.functions.activation import tanh
from chainer.functions.array import reshape
from chainer.functions.array import split_axis
from chainer.functions.noise import zoneout
from chainer import link
from chainer.links.connection import linear
from chainer.utils import argument
from chainer import variable
class StatefulZoneoutLSTM(link.Chain):
def __init__(self, in_size, out_size, c_ratio=0.5, h_ratio=0.5, **kwargs):
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
super(StatefulZoneoutLSTM, self).__init__()
self.state_size = out_size
self.c_ratio = c_ratio
self.h_ratio = h_ratio
self.reset_state()
with self.init_scope():
self.upward = linear.Linear(in_size, 4 * out_size)
self.lateral = linear.Linear(out_size, 4 * out_size, nobias=True)
def device_resident_accept(self, visitor):
super(StatefulZoneoutLSTM, self).device_resident_accept(visitor)
if self.c is not None:
visitor.visit_variable(self.c)
if self.h is not None:
visitor.visit_variable(self.h)
def set_state(self, c, h):
"""Sets the internal state.
It sets the :attr:`c` and :attr:`h` attributes.
Args:
c (~chainer.Variable): A new cell states of LSTM units.
h (~chainer.Variable): A new output at the previous time step.
"""
assert isinstance(c, variable.Variable)
assert isinstance(h, variable.Variable)
c.to_device(self.device)
h.to_device(self.device)
self.c = c
self.h = h
def reset_state(self):
"""Resets the internal state.
It sets ``None`` to the :attr:`c` and :attr:`h` attributes.
"""
self.c = self.h = None
def forward(self, x):
"""Updates the internal state and returns the LSTM outputs.
Args:
x (~chainer.Variable): A new batch from the input sequence.
Returns:
~chainer.Variable: Outputs of updated LSTM units.
"""
lstm_in = self.upward(x)
if self.h is not None:
lstm_in += self.lateral(self.h)
else:
xp = self.xp
with chainer.using_device(self.device):
self.h = variable.Variable(
xp.zeros((len(x), self.state_size), dtype=x.dtype))
if self.c is None:
xp = self.xp
with chainer.using_device(self.device):
self.c = variable.Variable(
xp.zeros((len(x), self.state_size), dtype=x.dtype))
lstm_in = reshape.reshape(
lstm_in, (len(lstm_in), lstm_in.shape[1] // 4, 4))
a, i, f, o = split_axis.split_axis(lstm_in, 4, 2)
a = reshape.reshape(a, (len(a), self.state_size))
i = reshape.reshape(i, (len(i), self.state_size))
f = reshape.reshape(f, (len(f), self.state_size))
o = reshape.reshape(o, (len(o), self.state_size))
c_tmp = tanh.tanh(a) * sigmoid.sigmoid(i) + sigmoid.sigmoid(f) * self.c
self.c = zoneout.zoneout(self.c, c_tmp, self.c_ratio)
self.h = zoneout.zoneout(self.h,
sigmoid.sigmoid(o) * tanh.tanh(c_tmp),
self.h_ratio)
return self.h
| 3,551
| 33.485437
| 79
|
py
|
chainer
|
chainer-master/chainer/links/rnn/gru.py
|
from chainer.functions.activation import sigmoid
from chainer.functions.activation import tanh
from chainer.functions.math import linear_interpolate
from chainer import link
from chainer.links.connection import linear
from chainer import variable
class GRUBase(link.Chain):
def __init__(self, in_size, out_size, init=None,
inner_init=None, bias_init=None):
super(GRUBase, self).__init__()
with self.init_scope():
self.W_r = linear.Linear(
in_size, out_size, initialW=init, initial_bias=bias_init)
self.U_r = linear.Linear(
out_size, out_size, initialW=inner_init,
initial_bias=bias_init)
self.W_z = linear.Linear(
in_size, out_size, initialW=init, initial_bias=bias_init)
self.U_z = linear.Linear(
out_size, out_size, initialW=inner_init,
initial_bias=bias_init)
self.W = linear.Linear(
in_size, out_size, initialW=init, initial_bias=bias_init)
self.U = linear.Linear(
out_size, out_size, initialW=inner_init,
initial_bias=bias_init)
class StatelessGRU(GRUBase):
"""Stateless Gated Recurrent Unit function (GRU).
GRU function has six parameters :math:`W_r`, :math:`W_z`, :math:`W`,
:math:`U_r`, :math:`U_z`, and :math:`U`.
The three parameters :math:`W_r`, :math:`W_z`, and :math:`W` are
:math:`n \\times m` matrices, and the others :math:`U_r`, :math:`U_z`,
and :math:`U` are :math:`n \\times n` matrices, where :math:`m` is the
length of input vectors and :math:`n` is the length of hidden vectors.
Given two inputs a previous hidden vector :math:`h` and an input vector
:math:`x`, GRU returns the next hidden vector :math:`h'` defined as
.. math::
r &=& \\sigma(W_r x + U_r h), \\\\
z &=& \\sigma(W_z x + U_z h), \\\\
\\bar{h} &=& \\tanh(W x + U (r \\odot h)), \\\\
h' &=& (1 - z) \\odot h + z \\odot \\bar{h},
where :math:`\\sigma` is the sigmoid function, and :math:`\\odot` is the
element-wise product.
As the name indicates, :class:`~chainer.links.StatelessGRU` is *stateless*,
meaning that it does not hold the value of
hidden vector :math:`h`.
For a *stateful* GRU, use :class:`~chainer.links.StatefulGRU`.
Args:
in_size(int): Dimension of input vector :math:`x`.
If ``None``, parameter initialization will be deferred
until the first forward data pass
at which time the size will be determined.
out_size(int): Dimension of hidden vector :math:`h`,
:math:`\\bar{h}` and :math:`h'`.
See:
- `On the Properties of Neural Machine Translation: Encoder-Decoder
Approaches <https://www.aclweb.org/anthology/W14-4012>`_
[Cho+, SSST2014].
- `Empirical Evaluation of Gated Recurrent Neural Networks on Sequence
Modeling <https://arxiv.org/abs/1412.3555>`_
[Chung+NIPS2014 DLWorkshop].
.. seealso:: :class:`~chainer.links.StatefulGRU`
.. admonition:: Example
There are several ways to make a ``StatelessGRU`` link.
Let ``x`` be a two-dimensional input array:
>>> in_size = 10
>>> out_size = 20
>>> x = np.zeros((1, in_size), dtype=np.float32)
>>> h = np.zeros((1, out_size), dtype=np.float32)
1. Give both ``in_size`` and ``out_size`` arguments:
>>> l = L.StatelessGRU(in_size, out_size)
>>> h_new = l(h, x)
>>> h_new.shape
(1, 20)
2. Omit ``in_size`` argument or fill it with ``None``:
>>> l = L.StatelessGRU(None, out_size)
>>> h_new = l(h, x)
>>> h_new.shape
(1, 20)
"""
def forward(self, h, x):
r = sigmoid.sigmoid(self.W_r(x) + self.U_r(h))
z = sigmoid.sigmoid(self.W_z(x) + self.U_z(h))
h_bar = tanh.tanh(self.W(x) + self.U(r * h))
h_new = linear_interpolate.linear_interpolate(z, h_bar, h)
return h_new
class StatefulGRU(GRUBase):
"""Stateful Gated Recurrent Unit function (GRU).
Stateful GRU function has six parameters :math:`W_r`, :math:`W_z`,
:math:`W`, :math:`U_r`, :math:`U_z`, and :math:`U`.
The three parameters :math:`W_r`, :math:`W_z`, and :math:`W` are
:math:`n \\times m` matrices, and the others :math:`U_r`, :math:`U_z`,
and :math:`U` are :math:`n \\times n` matrices, where :math:`m` is the
length of input vectors and :math:`n` is the length of hidden vectors.
Given input vector :math:`x`, Stateful GRU returns the next
hidden vector :math:`h'` defined as
.. math::
r &=& \\sigma(W_r x + U_r h), \\\\
z &=& \\sigma(W_z x + U_z h), \\\\
\\bar{h} &=& \\tanh(W x + U (r \\odot h)), \\\\
h' &=& (1 - z) \\odot h + z \\odot \\bar{h},
where :math:`h` is current hidden vector.
As the name indicates, :class:`~chainer.links.StatefulGRU` is *stateful*,
meaning that it also holds the next hidden vector `h'` as a state.
For a *stateless* GRU, use :class:`~chainer.links.StatelessGRU`.
Args:
in_size(int): Dimension of input vector :math:`x`.
out_size(int): Dimension of hidden vector :math:`h`.
init: Initializer for GRU's input units (:math:`W`).
It is a callable that takes :ref:`ndarray` and edits its value.
If it is ``None``, the default initializer is used.
inner_init: Initializer for the GRU's inner
recurrent units (:math:`U`).
It is a callable that takes :ref:`ndarray` and edits its value.
If it is ``None``, the default initializer is used.
bias_init: Bias initializer.
It is a callable that takes :ref:`ndarray` and edits its value.
If ``None``, the bias is set to zero.
Attributes:
h(~chainer.Variable): Hidden vector that indicates the state of
:class:`~chainer.links.StatefulGRU`.
.. seealso::
* :class:`~chainer.links.StatelessGRU`
* :class:`~chainer.links.GRU`: an alias of
:class:`~chainer.links.StatefulGRU`
.. admonition:: Example
There are several ways to make a ``StatefulGRU`` link.
Let ``x`` be a two-dimensional input array:
>>> in_size = 10
>>> out_size = 20
>>> x = np.zeros((1, in_size), dtype=np.float32)
1. Give only ``in_size`` and ``out_size`` arguments:
>>> l = L.StatefulGRU(in_size, out_size)
>>> h_new = l(x)
>>> h_new.shape
(1, 20)
2. Give all optional arguments:
>>> init = np.zeros((out_size, in_size), dtype=np.float32)
>>> inner_init = np.zeros((out_size, out_size), dtype=np.float32)
>>> bias = np.zeros((1, out_size), dtype=np.float32)
>>> l = L.StatefulGRU(in_size, out_size, init=init,
... inner_init=inner_init, bias_init=bias)
>>> h_new = l(x)
>>> h_new.shape
(1, 20)
"""
def __init__(self, in_size, out_size, init=None,
inner_init=None, bias_init=0):
super(StatefulGRU, self).__init__(
in_size, out_size, init, inner_init, bias_init)
self.state_size = out_size
self.reset_state()
def device_resident_accept(self, visitor):
super(StatefulGRU, self).device_resident_accept(visitor)
if self.h is not None:
visitor.visit_variable(self.h)
def set_state(self, h):
assert isinstance(h, variable.Variable)
h.to_device(self.device)
self.h = h
def reset_state(self):
self.h = None
def forward(self, x):
z = self.W_z(x)
h_bar = self.W(x)
if self.h is not None:
r = sigmoid.sigmoid(self.W_r(x) + self.U_r(self.h))
z += self.U_z(self.h)
h_bar += self.U(r * self.h)
z = sigmoid.sigmoid(z)
h_bar = tanh.tanh(h_bar)
if self.h is not None:
h_new = linear_interpolate.linear_interpolate(z, h_bar, self.h)
else:
h_new = z * h_bar
self.h = h_new
return self.h
class GRU(StatefulGRU):
"""Stateful Gated Recurrent Unit function (GRU)
This is an alias of :class:`~chainer.links.StatefulGRU`.
"""
def forward(self, *args):
"""forward(self, x)
Does forward propagation.
"""
n_args = len(args)
msg = ('Invalid argument. The length of GRU.forward must be 1. '
'But %d is given. ' % n_args)
if n_args == 0 or n_args >= 3:
raise ValueError(msg)
elif n_args == 2:
msg += ('In Chainer v2, chainer.links.GRU is changed '
'from stateless to stateful. '
'One possibility is you assume GRU to be stateless. '
'Use chainer.links.StatelessGRU instead.')
raise ValueError(msg)
return super(GRU, self).forward(args[0])
| 9,127
| 34.379845
| 79
|
py
|
chainer
|
chainer-master/chainer/links/rnn/mgu.py
|
import numpy
import chainer
from chainer.functions.activation import sigmoid
from chainer.functions.activation import tanh
from chainer.functions.array import concat
from chainer.functions.math import linear_interpolate
from chainer import link
from chainer.links.connection import linear
class MGUBase(link.Chain):
def __init__(self, n_inputs, n_units):
super(MGUBase, self).__init__()
with self.init_scope():
self.W_f = linear.Linear(n_inputs + n_units, n_units)
self.W_h = linear.Linear(n_inputs + n_units, n_units)
def _call_mgu(self, h, x):
f = sigmoid.sigmoid(self.W_f(concat.concat([h, x])))
h_bar = tanh.tanh(self.W_h(concat.concat([f * h, x])))
h_new = linear_interpolate.linear_interpolate(f, h_bar, h)
return h_new
class StatelessMGU(MGUBase):
forward = MGUBase._call_mgu
class StatefulMGU(MGUBase):
def __init__(self, in_size, out_size):
super(StatefulMGU, self).__init__(in_size, out_size)
self._state_size = out_size
self.reset_state()
def device_resident_accept(self, visitor):
super(StatefulMGU, self).device_resident_accept(visitor)
if self.h is not None:
visitor.visit_variable(self.h)
def set_state(self, h):
assert isinstance(h, chainer.Variable)
h_ = h
if self.xp is numpy:
h_.to_cpu()
else:
h_.to_gpu()
self.h = h_
def reset_state(self):
self.h = None
def forward(self, x):
if self.h is None:
n_batch = x.shape[0]
dtype = chainer.get_dtype()
h_data = self.xp.zeros(
(n_batch, self._state_size), dtype=dtype)
h = chainer.Variable(h_data)
else:
h = self.h
self.h = self._call_mgu(h, x)
return self.h
| 1,872
| 26.544118
| 66
|
py
|
chainer
|
chainer-master/chainer/links/rnn/tree_lstm.py
|
import numpy
from chainer.functions.activation import sigmoid
from chainer.functions.activation import tanh
from chainer.functions.array import concat
from chainer.functions.array import split_axis
from chainer.functions.rnn import tree_lstm
from chainer import link
from chainer.links.connection import linear
class ChildSumTreeLSTM(link.Chain):
"""Child-Sum TreeLSTM unit.
.. warning::
This feature is experimental. The interface can change in the future.
This is a Child-Sum TreeLSTM unit as a chain.
This link is a variable arguments function, which compounds
the states of all children nodes into the new states of
a current (parent) node. *states* denotes the cell state, :math:`c`,
and the output, :math:`h`, which are produced by this link.
This link doesn't keep cell and hidden states internally.
For example, this link is called such as
``func(c1, c2, h1, h2, x)`` if the number of children nodes is 2,
while ``func(c1, c2, c3, h1, h2, h3, x)`` if that is 3.
This function is *independent* from an order of children nodes.
Thus, the returns of ``func(c1, c2, h1, h2, x)`` equal to
those of ``func(c2, c1, h2, h1, x)``.
Args:
in_size (int): Dimension of input vectors.
out_size (int): Dimensionality of cell and output vectors.
Attributes:
W_x (chainer.links.Linear): Linear layer of
connections from input vectors.
W_h_aio (chainer.links.Linear): Linear layer of connections between
(:math:`a`, :math:`i`, :math:`o`) and summation of children's
output vectors. :math:`a`, :math:`i` and :math:`o` denotes
input compound,
input gate and output gate, respectively.
:math:`a`, input compound, equals to :math:`u` in
the paper by Tai et al.
W_h_f (chainer.links.Linear): Linear layer of connections between
forget gate :math:`f` and the output of each child.
See the paper for details: `Improved Semantic Representations From
Tree-Structured Long Short-Term Memory Networks
<https://www.aclweb.org/anthology/P15-1150>`_.
"""
def __init__(self, in_size, out_size):
super(ChildSumTreeLSTM, self).__init__()
with self.init_scope():
self.W_x = linear.Linear(in_size, 4 * out_size)
self.W_h_aio = linear.Linear(out_size, 3 * out_size, nobias=True)
self.W_h_f = linear.Linear(out_size, out_size, nobias=True)
self.in_size = in_size
self.state_size = out_size
def forward(self, *cshsx):
"""Returns new cell state and output of Child-Sum TreeLSTM.
Args:
cshsx (list of :class:`~chainer.Variable`): Variable arguments
which include all cell vectors and all output vectors of
variable children, and an input vector.
Returns:
tuple of ~chainer.Variable: Returns
:math:`(c_{new}, h_{new})`, where :math:`c_{new}` represents
new cell state vector, and :math:`h_{new}` is new output
vector.
"""
cs = cshsx[:len(cshsx) // 2]
hs = cshsx[len(cshsx) // 2:-1]
x = cshsx[-1]
assert(len(cshsx) % 2 == 1)
assert(len(cs) == len(hs))
if x is None:
if any(c is not None for c in cs):
base = [c for c in cs if c is not None][0]
elif any(h is not None for h in hs):
base = [h for h in hs if h is not None][0]
else:
raise ValueError('All inputs (cs, hs, x) are None.')
batchsize, dtype = base.shape[0], base.dtype
x = self.xp.zeros(
(batchsize, self.in_size), dtype=dtype)
W_x_in = self.W_x(x)
W_x_aio_in, W_x_f_in = split_axis.split_axis(
W_x_in, [3 * self.state_size], axis=1)
if len(hs) == 0:
aio_in = W_x_aio_in
a, i, o = split_axis.split_axis(aio_in, 3, axis=1)
c = sigmoid.sigmoid(i) * tanh.tanh(a)
h = sigmoid.sigmoid(o) * tanh.tanh(c)
return c, h
hs = self._pad_zero_nodes(
hs, (x.shape[0], self.state_size), dtype=x.dtype)
cs = self._pad_zero_nodes(
cs, (x.shape[0], self.state_size), dtype=x.dtype)
aio_in = self.W_h_aio(sum(hs)) + W_x_aio_in
W_h_fs_in = concat.concat(split_axis.split_axis(
self.W_h_f(concat.concat(hs, axis=0)), len(hs), axis=0),
axis=1)
f_in = W_h_fs_in + \
concat.concat([W_x_f_in] * len(hs), axis=1)
tree_lstm_in = concat.concat([aio_in, f_in], axis=1)
return tree_lstm.tree_lstm(*(cs + (tree_lstm_in, )))
def _pad_zero_nodes(self, vs, shape, dtype=numpy.float32):
if any(v is None for v in vs):
zero = self.xp.zeros(shape, dtype=dtype)
return tuple(zero if v is None else v for v in vs)
else:
return vs
class NaryTreeLSTM(link.Chain):
"""N-ary TreeLSTM unit.
.. warning::
This feature is experimental. The interface can change in the future.
This is a N-ary TreeLSTM unit as a chain.
This link is a fixed-length arguments function, which compounds
the states of all children nodes into the new states of
a current (parent) node. *states* denotes the cell state, :math:`c`,
and the output, :math:`h`, which are produced by this link.
This link doesn't keep cell and hidden states internally.
For example, this link is called such as
``func(c1, c2, h1, h2, x)`` if the number of children nodes
was set 2 (``n_ary = 2``), while
``func(c1, c2, c3, h1, h2, h3, x)`` if that was 3
(``n_ary = 3``).
This function is *dependent* from an order of children nodes
unlike Child-Sum TreeLSTM.
Thus, the returns of ``func(c1, c2, h1, h2, x)`` are
different from those of ``func(c2, c1, h2, h1, x)``.
Args:
in_size (int): Dimension of input vectors.
out_size (int): Dimensionality of cell and output vectors.
n_ary (int): The number of children nodes in a tree structure.
Attributes:
W_x (chainer.links.Linear): Linear layer of
connections from input vectors.
W_h (chainer.links.Linear): Linear layer of connections between
(:math:`a`, :math:`i`, :math:`o`, all :math:`f`)
and the output of each child.
:math:`a`, :math:`i`, :math:`o` and :math:`f` denotes input
compound, input gate, output gate and forget gate, respectively.
:math:`a`, input compound, equals to :math:`u` in
the paper by Tai et al.
See the papers for details: `Improved Semantic Representations From
Tree-Structured Long Short-Term Memory Networks
<https://www.aclweb.org/anthology/P15-1150>`_, and
`A Fast Unified Model for Parsing and Sentence Understanding
<https://arxiv.org/pdf/1603.06021.pdf>`_.
Tai et al.'s N-Ary TreeLSTM is little extended in
Bowman et al., and this link is based on
the variant by Bowman et al.
Specifically, eq. 10 in Tai et al. has only one :math:`W` matrix
to be applied to :math:`x`, consistently for all children.
On the other hand, Bowman et al.'s model has multiple matrices,
each of which affects the forget gate for each child's cell individually.
"""
def __init__(self, in_size, out_size, n_ary=2):
assert(n_ary >= 1)
super(NaryTreeLSTM, self).__init__()
with self.init_scope():
self.W_x = linear.Linear(in_size, (3 + n_ary) * out_size)
for i in range(1, n_ary + 1):
l = linear.Linear(
out_size, (3 + n_ary) * out_size, nobias=True)
setattr(self, 'W_h{}'.format(i), l)
self.in_size = in_size
self.state_size = out_size
self.n_ary = n_ary
def forward(self, *cshsx):
"""Returns new cell state and output of N-ary TreeLSTM.
Args:
cshsx (list of :class:`~chainer.Variable`): Arguments which include
all cell vectors and all output vectors of fixed-length
children, and an input vector. The number of arguments must be
same as ``n_ary * 2 + 1``.
Returns:
tuple of ~chainer.Variable: Returns :math:`(c_{new}, h_{new})`,
where :math:`c_{new}` represents new cell state vector,
and :math:`h_{new}` is new output vector.
"""
assert(len(cshsx) == self.n_ary * 2 + 1)
cs = cshsx[:self.n_ary]
hs = cshsx[self.n_ary:-1]
x = cshsx[-1]
if x is None:
if any(c is not None for c in cs):
base = [c for c in cs if c is not None][0]
elif any(h is not None for h in hs):
base = [h for h in hs if h is not None][0]
else:
raise ValueError('All inputs (cs, hs, x) are None.')
batchsize, dtype = base.shape[0], base.dtype
x = self.xp.zeros(
(batchsize, self.in_size), dtype=dtype)
tree_lstm_in = self.W_x(x)
for i, h in enumerate(hs, start=1):
if h is not None:
tree_lstm_in += getattr(self, 'W_h{}'.format(i))(h)
cs = self._pad_zero_nodes(
cs, (x.shape[0], self.state_size), dtype=x.dtype)
return tree_lstm.tree_lstm(*(cs + (tree_lstm_in, )))
def _pad_zero_nodes(self, vs, shape, dtype=numpy.float32):
if any(v is None for v in vs):
zero = self.xp.zeros(shape, dtype=dtype)
return tuple(zero if v is None else v for v in vs)
else:
return vs
| 9,745
| 37.674603
| 79
|
py
|
chainer
|
chainer-master/chainer/links/rnn/n_step_lstm.py
|
from chainer.functions.rnn import n_step_lstm as rnn
from chainer.links.rnn import n_step_rnn
class NStepLSTMBase(n_step_rnn.NStepRNNBase):
"""Base link class for Stacked LSTM/BiLSTM links.
This link is base link class for :func:`chainer.links.NStepLSTM` and
:func:`chainer.links.NStepBiLSTM`.
This link's behavior depends on argument, ``use_bi_direction``.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
use_bi_direction (bool): if ``True``, use Bi-directional LSTM.
.. seealso::
:func:`chainer.functions.n_step_lstm`
:func:`chainer.functions.n_step_bilstm`
"""
n_weights = 8
def forward(self, hx, cx, xs, **kwargs):
"""forward(self, hx, cx, xs)
Calculates all of the hidden states and the cell states.
Args:
hx (:class:`~chainer.Variable` or None):
Initial hidden states. If ``None`` is specified zero-vector
is used. Its shape is ``(S, B, N)`` for uni-directional LSTM
and ``(2S, B, N)`` for bi-directional LSTM where ``S`` is
the number of layers and is equal to ``n_layers``,
``B`` is the mini-batch size,
and ``N`` is the dimension of the hidden units.
cx (:class:`~chainer.Variable` or None):
Initial cell states. If ``None`` is specified zero-vector is
used. It has the same shape as ``hx``.
xs (list of :class:`~chainer.Variable`): List of input sequences.
Each element ``xs[i]`` is a :class:`chainer.Variable` holding
a sequence. Its shape is ``(L_i, I)``, where ``L_i`` is the
length of a sequence for batch ``i``, and ``I`` is the size of
the input and is equal to ``in_size``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[i]`` holds hidden states of the last layer corresponding
to an input ``xs[i]``. Its shape is ``(L_i, N)`` for
uni-directional LSTM and ``(L_i, 2N)`` for bi-directional LSTM
where ``L_i`` is the length of a sequence for batch ``i``,
and ``N`` is size of hidden units.
"""
(hy, cy), ys = self._call([hx, cx], xs, **kwargs)
return hy, cy, ys
class NStepLSTM(NStepLSTMBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Uni-directional LSTM for sequences.
This link is stacked version of Uni-directional LSTM for sequences.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_lstm`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_lstm`
.. admonition:: Example
*Read* :meth:`forward` *method below first.*
>>> dropout_ratio = 0.0
>>> in_size, seq_len, n_layers, out_size = 2, 4, 2, 3
>>> batch = 5
>>> xs = [
... Variable(np.random.rand(seq_len, in_size).astype(np.float32))
... for i in range(batch)]
>>> [x.shape for x in xs]
[(4, 2), (4, 2), (4, 2), (4, 2), (4, 2)]
>>> lstm = L.NStepLSTM(n_layers, in_size, out_size, dropout_ratio)
Without hidden or cell state:
>>> hy, cy, ys = lstm(None, None, xs)
>>> hy.shape # shape should be (n_layers, batch, out_size)
(2, 5, 3)
>>> ys[0].shape # should be (seq_len, out_size)
(4, 3)
>>> len(ys) # should be equal to batch
5
With hidden and cell states:
>>> h_shape = (n_layers, batch, out_size)
>>> hx = Variable(np.ones(h_shape, np.float32))
>>> cx = Variable(np.ones(h_shape, np.float32))
>>> hy, cy, ys = lstm(hx, cx, xs)
>>> hy.shape # shape should be (n_layers, batch, out_size)
(2, 5, 3)
>>> ys[0].shape # should be (seq_len, out_size)
(4, 3)
"""
use_bi_direction = False
def rnn(self, *args):
return rnn.n_step_lstm(*args)
@property
def n_cells(self):
return 2
class NStepBiLSTM(NStepLSTMBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Bi-directional LSTM for sequences.
This link is stacked version of Bi-directional LSTM for sequences.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_bilstm`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_bilstm`
"""
use_bi_direction = True
def rnn(self, *args):
return rnn.n_step_bilstm(*args)
@property
def n_cells(self):
return 2
| 6,131
| 34.651163
| 79
|
py
|
chainer
|
chainer-master/chainer/links/rnn/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/links/rnn/n_step_rnn.py
|
import numpy
import six
import chainer
from chainer.functions.array import permutate
from chainer.functions.array import transpose_sequence
from chainer.functions.rnn import n_step_rnn as rnn
from chainer import initializers
from chainer import link
from chainer.utils import argument
from chainer import variable
def argsort_list_descent(lst):
return numpy.argsort([-len(x) for x in lst]).astype(numpy.int32)
def permutate_list(lst, indices, inv):
ret = [None] * len(lst)
if inv:
for i, ind in enumerate(indices):
ret[ind] = lst[i]
else:
for i, ind in enumerate(indices):
ret[i] = lst[ind]
return ret
class NStepRNNBase(link.ChainList):
"""__init__(self, n_layers, in_size, out_size, dropout, \
*, initialW=None, initial_bias=None)
Base link class for Stacked RNN/BiRNN links.
This link is base link class for :func:`chainer.links.NStepRNN` and
:func:`chainer.links.NStepBiRNN`.
This link's behavior depends on argument, ``use_bi_direction``.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
initialW (:ref:`initializer <initializer>`): Initializer to initialize
the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be 2. If ``initialW`` is ``None``, then the
weights are initialized with i.i.d. Gaussian samples, each of which
has zero mean and deviation :math:`\\sqrt{1/\\text{in_size}}`.
initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias. If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
.. seealso::
:func:`chainer.links.NStepRNNReLU`
:func:`chainer.links.NStepRNNTanh`
:func:`chainer.links.NStepBiRNNReLU`
:func:`chainer.links.NStepBiRNNTanh`
"""
def __init__(self, n_layers, in_size, out_size, dropout,
*, initialW=None, initial_bias=None, **kwargs):
if kwargs:
argument.check_unexpected_kwargs(
kwargs,
use_cudnn='use_cudnn argument is not supported anymore. '
'Use chainer.using_config',
use_bi_direction='use_bi_direction is not supported anymore',
activation='activation is not supported anymore')
argument.assert_kwargs_empty(kwargs)
weights = []
if self.use_bi_direction:
direction = 2
else:
direction = 1
W_initializer = initializers._get_initializer(initialW)
if initial_bias is None:
initial_bias = 0
bias_initializer = initializers._get_initializer(initial_bias)
for i in six.moves.range(n_layers):
for di in six.moves.range(direction):
weight = link.Link()
with weight.init_scope():
for j in six.moves.range(self.n_weights):
if i == 0 and j < self.n_weights // 2:
w_in = in_size
elif i > 0 and j < self.n_weights // 2:
w_in = out_size * direction
else:
w_in = out_size
w = variable.Parameter(W_initializer, (out_size, w_in))
b = variable.Parameter(bias_initializer, out_size)
setattr(weight, 'w%d' % j, w)
setattr(weight, 'b%d' % j, b)
weights.append(weight)
super(NStepRNNBase, self).__init__(*weights)
self.ws = [[getattr(layer, 'w%d' % i)
for i in six.moves.range(self.n_weights)]
for layer in self]
self.bs = [[getattr(layer, 'b%d' % i)
for i in six.moves.range(self.n_weights)]
for layer in self]
self.n_layers = n_layers
self.dropout = dropout
self.out_size = out_size
self.direction = direction
def copy(self, mode='share'):
ret = super(NStepRNNBase, self).copy(mode)
ret.ws = [[getattr(layer, 'w%d' % i)
for i in six.moves.range(ret.n_weights)] for layer in ret]
ret.bs = [[getattr(layer, 'b%d' % i)
for i in six.moves.range(ret.n_weights)] for layer in ret]
return ret
def init_hx(self, xs):
shape = (self.n_layers * self.direction, len(xs), self.out_size)
with chainer.using_device(self.device):
hx = variable.Variable(self.xp.zeros(shape, dtype=xs[0].dtype))
return hx
def rnn(self, *args):
"""Calls RNN function.
This function must be implemented in a child class.
"""
raise NotImplementedError
@property
def n_cells(self):
"""Returns the number of cells.
This function must be implemented in a child class.
"""
return NotImplementedError
def forward(self, hx, xs, **kwargs):
"""forward(self, hx, xs)
Calculates all of the hidden states and the cell states.
Args:
hx (:class:`~chainer.Variable` or None): Initial hidden states.
If ``None`` is specified zero-vector is used.
Its shape is ``(S, B, N)`` for uni-directional RNN
and ``(2S, B, N)`` for bi-directional RNN where ``S`` is
the number of layers and is equal to ``n_layers``, ``B`` is
the mini-batch size, and ``N`` is the dimension of
the hidden units.
xs (list of :class:`~chainer.Variable`): List of input sequences.
Each element ``xs[i]`` is a :class:`chainer.Variable` holding
a sequence. Its shape is ``(L_i, I)``, where ``L_i`` is the
length of a sequence for batch ``i``, and ``I`` is the size of
the input and is equal to ``in_size``.
Returns:
tuple: This function returns a tuple containing two elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[i]`` holds hidden states of the last layer corresponding
to an input ``xs[i]``. Its shape is ``(L_i, N)`` for
uni-directional RNN and ``(L_i, 2N)`` for bi-directional RNN
where ``L_i`` is the length of a sequence for batch ``i``,
and ``N`` is size of hidden units.
"""
(hy,), ys = self._call([hx], xs, **kwargs)
return hy, ys
def _call(self, hs, xs, **kwargs):
"""Calls RNN function.
Args:
hs (list of ~chainer.Variable or None): Lisit of hidden states.
Its length depends on its implementation.
If ``None`` is specified zero-vector is used.
xs (list of ~chainer.Variable): List of input sequences.
Each element ``xs[i]`` is a :class:`chainer.Variable` holding
a sequence.
Returns:
tuple: hs
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
assert isinstance(xs, (list, tuple))
indices = argsort_list_descent(xs)
xs = permutate_list(xs, indices, inv=False)
hxs = []
for hx in hs:
if hx is None:
hx = self.init_hx(xs)
else:
hx = permutate.permutate(hx, indices, axis=1, inv=False)
hxs.append(hx)
trans_x = transpose_sequence.transpose_sequence(xs)
args = [self.n_layers, self.dropout] + hxs + \
[self.ws, self.bs, trans_x]
result = self.rnn(*args)
hys = [permutate.permutate(h, indices, axis=1, inv=True)
for h in result[:-1]]
trans_y = result[-1]
ys = transpose_sequence.transpose_sequence(trans_y)
ys = permutate_list(ys, indices, inv=True)
return hys, ys
class NStepRNNTanh(NStepRNNBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Uni-directional RNN for sequences.
This link is stacked version of Uni-directional RNN for sequences.
Note that the activation function is ``tanh``.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_rnn`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_rnn`
"""
n_weights = 2
use_bi_direction = False
def rnn(self, *args):
return rnn.n_step_rnn(*args, activation='tanh')
@property
def n_cells(self):
return 1
class NStepRNNReLU(NStepRNNBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Uni-directional RNN for sequences.
This link is stacked version of Uni-directional RNN for sequences.
Note that the activation function is ``relu``.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_rnn`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_rnn`
"""
n_weights = 2
use_bi_direction = False
def rnn(self, *args):
return rnn.n_step_rnn(*args, activation='relu')
@property
def n_cells(self):
return 1
class NStepBiRNNTanh(NStepRNNBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Bi-directional RNN for sequences.
This link is stacked version of Bi-directional RNN for sequences.
Note that the activation function is ``tanh``.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_birnn`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_birnn`
"""
n_weights = 2
use_bi_direction = True
def rnn(self, *args):
return rnn.n_step_birnn(*args, activation='tanh')
@property
def n_cells(self):
return 1
class NStepBiRNNReLU(NStepRNNBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Bi-directional RNN for sequences.
This link is stacked version of Bi-directional RNN for sequences.
Note that the activation function is ``relu``.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_birnn`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_birnn`
"""
n_weights = 2
use_bi_direction = True
def rnn(self, *args):
return rnn.n_step_birnn(*args, activation='relu')
@property
def n_cells(self):
return 1
| 12,967
| 33.954178
| 79
|
py
|
chainer
|
chainer-master/chainer/links/rnn/peephole.py
|
import chainer
from chainer.functions.activation import sigmoid
from chainer.functions.activation import tanh
from chainer.functions.array import reshape
from chainer.functions.array import split_axis
from chainer import link
from chainer.links.connection import linear
from chainer import variable
class StatefulPeepholeLSTM(link.Chain):
"""Fully-connected LSTM layer with peephole connections.
This is a fully-connected LSTM layer with peephole connections as a chain.
Unlike the :class:`~chainer.links.LSTM` link, this chain holds ``peep_i``,
``peep_f`` and ``peep_o`` as child links besides ``upward`` and
``lateral``.
Given a input vector :math:`x`, Peephole returns the next hidden vector
:math:`h'` defined as
.. math::
a &=& \\tanh(upward x + lateral h), \\\\
i &=& \\sigma(upward x + lateral h + peep_i c), \\\\
f &=& \\sigma(upward x + lateral h + peep_f c), \\\\
c' &=& a \\odot i + f \\odot c, \\\\
o &=& \\sigma(upward x + lateral h + peep_o c'), \\\\
h' &=& o \\tanh(c'),
where :math:`\\sigma` is the sigmoid function, :math:`\\odot` is the
element-wise product, :math:`c` is the current cell state, :math:`c'`
is the next cell state and :math:`h` is the current hidden vector.
Args:
in_size(int): Dimension of the input vector :math:`x`.
out_size(int): Dimension of the hidden vector :math:`h`.
Attributes:
upward (~chainer.links.Linear): Linear layer of upward connections.
lateral (~chainer.links.Linear): Linear layer of lateral connections.
peep_i (~chainer.links.Linear): Linear layer of peephole connections
to the input gate.
peep_f (~chainer.links.Linear): Linear layer of peephole connections
to the forget gate.
peep_o (~chainer.links.Linear): Linear layer of peephole connections
to the output gate.
c (~chainer.Variable): Cell states of LSTM units.
h (~chainer.Variable): Output at the current time step.
"""
def __init__(self, in_size, out_size):
super(StatefulPeepholeLSTM, self).__init__()
self.state_size = out_size
self.reset_state()
with self.init_scope():
self.upward = linear.Linear(in_size, 4 * out_size)
self.lateral = linear.Linear(out_size, 4 * out_size, nobias=True)
self.peep_i = linear.Linear(out_size, out_size, nobias=True)
self.peep_f = linear.Linear(out_size, out_size, nobias=True)
self.peep_o = linear.Linear(out_size, out_size, nobias=True)
def device_resident_accept(self, visitor):
super(StatefulPeepholeLSTM, self).device_resident_accept(visitor)
if self.c is not None:
visitor.visit_variable(self.c)
if self.h is not None:
visitor.visit_variable(self.h)
def reset_state(self):
"""Resets the internal states.
It sets ``None`` to the :attr:`c` and :attr:`h` attributes.
"""
self.c = self.h = None
def forward(self, x):
"""Updates the internal state and returns the LSTM outputs.
Args:
x (~chainer.Variable): A new batch from the input sequence.
Returns:
~chainer.Variable: Outputs of updated LSTM units.
"""
lstm_in = self.upward(x)
if self.h is not None:
lstm_in += self.lateral(self.h)
if self.c is None:
xp = self.xp
with chainer.using_device(self.device):
self.c = variable.Variable(
xp.zeros((len(x), self.state_size), dtype=x.dtype))
lstm_in = reshape.reshape(
lstm_in, (len(lstm_in), lstm_in.shape[1] // 4, 4))
a, i, f, o = split_axis.split_axis(lstm_in, 4, 2)
a = reshape.reshape(a, a.shape[:2])
i = reshape.reshape(i, i.shape[:2])
f = reshape.reshape(f, f.shape[:2])
o = reshape.reshape(o, o.shape[:2])
peep_in_i = self.peep_i(self.c)
peep_in_f = self.peep_f(self.c)
a = tanh.tanh(a)
i = sigmoid.sigmoid(i + peep_in_i)
f = sigmoid.sigmoid(f + peep_in_f)
self.c = a * i + f * self.c
peep_in_o = self.peep_o(self.c)
o = sigmoid.sigmoid(o + peep_in_o)
self.h = o * tanh.tanh(self.c)
return self.h
| 4,445
| 37.327586
| 78
|
py
|
chainer
|
chainer-master/chainer/links/activation/simplified_dropconnect.py
|
import numpy
from chainer.functions.noise import simplified_dropconnect
from chainer import initializers
from chainer import link
from chainer import variable
class SimplifiedDropconnect(link.Link):
"""Fully-connected layer with simplified dropconnect regularization.
Notice:
This implementation cannot be used for reproduction of the paper.
There is a difference between the current implementation and the
original one.
The original version uses sampling with gaussian distribution before
passing activation function, whereas the current implementation averages
before activation.
Args:
in_size (int): Dimension of input vectors. If ``None``, parameter
initialization will be deferred until the first forward data pass
at which time the size will be determined.
out_size (int): Dimension of output vectors.
nobias (bool): If ``True``, then this link does not use the bias term.
initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be 3.
initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias. If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 2.
Attributes:
W (~chainer.Variable): Weight parameter.
b (~chainer.Variable): Bias parameter.
.. seealso:: :func:`~chainer.functions.simplified_dropconnect`
.. seealso::
Li, W., Matthew Z., Sixin Z., Yann L., Rob F. (2013).
Regularization of Neural Network using DropConnect.
International Conference on Machine Learning.
`URL <https://cs.nyu.edu/~wanli/dropc/>`_
"""
def __init__(self, in_size, out_size, ratio=.5, nobias=False,
initialW=None, initial_bias=None):
super(SimplifiedDropconnect, self).__init__()
self.out_size = out_size
self.ratio = ratio
if initialW is None:
initialW = initializers.HeNormal(1. / numpy.sqrt(2))
with self.init_scope():
W_initializer = initializers._get_initializer(initialW)
self.W = variable.Parameter(W_initializer)
if in_size is not None:
self._initialize_params(in_size)
if nobias:
self.b = None
else:
if initial_bias is None:
initial_bias = initializers.Constant(0)
bias_initializer = initializers._get_initializer(initial_bias)
self.b = variable.Parameter(bias_initializer, out_size)
def _initialize_params(self, in_size):
self.W.initialize((self.out_size, in_size))
def forward(self, x, train=True, mask=None, use_batchwise_mask=True):
"""Applies the simplified dropconnect layer.
Args:
x (chainer.Variable or :ref:`ndarray`):
Batch of input vectors. Its first dimension ``n`` is assumed
to be the *minibatch dimension*.
train (bool):
If ``True``, executes simplified dropconnect.
Otherwise, simplified dropconnect link works as a linear unit.
mask (None or chainer.Variable or :ref:`ndarray`):
If ``None``, randomized simplified dropconnect mask is
generated. Otherwise, The mask must be ``(n, M, N)`` or
``(M, N)`` shaped array, and `use_batchwise_mask` is ignored.
Main purpose of this option is debugging.
`mask` array will be used as a dropconnect mask.
use_batchwise_mask (bool):
If ``True``, dropped connections depend on each sample in
mini-batch.
Returns:
~chainer.Variable: Output of the simplified dropconnect layer.
"""
if self.W.array is None:
self._initialize_params(x.size // len(x))
if mask is not None and 'mask' not in self.__dict__:
self.add_persistent('mask', mask)
return simplified_dropconnect.simplified_dropconnect(
x, self.W, self.b, self.ratio, train, mask, use_batchwise_mask)
| 4,271
| 40.076923
| 78
|
py
|
chainer
|
chainer-master/chainer/links/activation/maxout.py
|
import numpy
import chainer
from chainer.functions.activation import maxout
from chainer import initializer
from chainer import link
from chainer.links.connection import linear
class Maxout(link.Chain):
"""Fully-connected maxout layer.
Let ``M``, ``P`` and ``N`` be an input dimension, a pool size,
and an output dimension, respectively.
For an input vector :math:`x` of size ``M``, it computes
.. math::
Y_{i} = \\mathrm{max}_{j} (W_{ij\\cdot}x + b_{ij}).
Here :math:`W` is a weight tensor of shape ``(M, P, N)``,
:math:`b` an optional bias vector of shape ``(M, P)``
and :math:`W_{ij\\cdot}` is a sub-vector extracted from
:math:`W` by fixing first and second dimensions to
:math:`i` and :math:`j`, respectively.
Minibatch dimension is omitted in the above equation.
As for the actual implementation, this chain has a
Linear link with a ``(M * P, N)`` weight matrix and
an optional ``M * P`` dimensional bias vector.
Args:
in_size (int): Dimension of input vectors.
out_size (int): Dimension of output vectors.
pool_size (int): Number of channels.
initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be 3.
initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias. If ``None``, the bias is omitted.
When it is :class:`numpy.ndarray`, its ``ndim`` should be 2.
Attributes:
linear (~chainer.Link): The Linear link that performs
affine transformation.
.. seealso:: :func:`~chainer.functions.maxout`
.. seealso::
Goodfellow, I., Warde-farley, D., Mirza, M.,
Courville, A., & Bengio, Y. (2013).
Maxout Networks. In Proceedings of the 30th International
Conference on Machine Learning (ICML-13) (pp. 1319-1327).
`URL <http://jmlr.org/proceedings/papers/v28/goodfellow13.html>`_
"""
def __init__(self, in_size, out_size, pool_size,
initialW=None, initial_bias=0):
super(Maxout, self).__init__()
linear_out_size = out_size * pool_size
if initialW is None or \
numpy.isscalar(initialW) or \
isinstance(initialW, initializer.Initializer):
pass
elif isinstance(initialW, chainer.get_array_types()):
if initialW.ndim != 3:
raise ValueError('initialW.ndim should be 3')
initialW = initialW.reshape(linear_out_size, in_size)
elif callable(initialW):
initialW_orig = initialW
def initialW(array):
array.shape = (out_size, pool_size, in_size)
initialW_orig(array)
array.shape = (linear_out_size, in_size)
if initial_bias is None or \
numpy.isscalar(initial_bias) or \
isinstance(initial_bias, initializer.Initializer):
pass
elif isinstance(initial_bias, chainer.get_array_types()):
if initial_bias.ndim != 2:
raise ValueError('initial_bias.ndim should be 2')
initial_bias = initial_bias.reshape(linear_out_size)
elif callable(initial_bias):
initial_bias_orig = initial_bias
def initial_bias(array):
array.shape = (out_size, pool_size)
initial_bias_orig(array)
array.shape = linear_out_size,
with self.init_scope():
self.linear = linear.Linear(
in_size, linear_out_size,
nobias=initial_bias is None, initialW=initialW,
initial_bias=initial_bias)
self.out_size = out_size
self.pool_size = pool_size
def forward(self, x):
"""Applies the maxout layer.
Args:
x (~chainer.Variable): Batch of input vectors.
Returns:
~chainer.Variable: Output of the maxout layer.
"""
y = self.linear(x)
return maxout.maxout(y, self.pool_size)
| 4,114
| 34.782609
| 74
|
py
|
chainer
|
chainer-master/chainer/links/activation/swish.py
|
from chainer.functions.activation import swish
from chainer import initializers
from chainer import link
from chainer import variable
class Swish(link.Link):
"""Swish activation function as a link.
Args:
beta_shape (tuple of ints or None): Shape of the parameter variable
:math:`\\beta`. If ``None``, parameter initialization will be
deferred until the first forward data pass at which time the shape
will be determined.
beta_init (float): Initial value of the parameter variable
:math:`\\beta`.
See the paper for details: `Searching for Activation Functions
<https://arxiv.org/abs/1710.05941>`_
To try Swish instead of ReLU, replace ``F.relu`` with individual ``Swish``
links registered to the model. For example, the model defined in the
`MNIST example
<https://github.com/chainer/chainer/tree/master/examples/mnist/train_mnist.py>`_
can be rewritten as follows.
ReLU version (original)::
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
self.l1 = L.Linear(None, n_units)
self.l2 = L.Linear(None, n_units)
self.l3 = L.Linear(None, n_out)
def forward(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
Swish version::
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
self.l1 = L.Linear(None, n_units)
self.s1 = L.Swish(None)
self.l2 = L.Linear(None, n_units)
self.s2 = L.Swish(None)
self.l3 = L.Linear(None, n_out)
def forward(self, x):
h1 = self.s1(self.l1(x))
h2 = self.s2(self.l2(h1))
return self.l3(h2)
.. seealso::
See :func:`chainer.functions.swish` for the definition of Swish
activation function.
Attributes:
beta (~chainer.Parameter): Parameter variable :math:`\\beta`.
"""
def __init__(self, beta_shape, beta_init=1.0):
super(Swish, self).__init__()
with self.init_scope():
if beta_shape is not None:
self.beta = variable.Parameter(beta_init, beta_shape)
else:
beta_init = initializers.Constant(beta_init)
self.beta = variable.Parameter(beta_init)
def forward(self, x):
"""Applies the Swish activation function.
Args:
x (~chainer.Variable): Input variable.
Returns:
~chainer.Variable: Output of the Swish activation function.
"""
if self.beta.array is None:
self.beta.initialize(x.shape[1:])
return swish.swish(x, self.beta)
| 3,025
| 30.852632
| 84
|
py
|
chainer
|
chainer-master/chainer/links/activation/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/links/activation/prelu.py
|
from chainer.functions.activation import prelu
from chainer import link
from chainer import variable
class PReLU(link.Link):
"""Parametric ReLU function as a link.
Args:
shape (tuple of ints): Shape of the parameter array.
init (float): Initial parameter value.
See the paper for details: `Delving Deep into Rectifiers: Surpassing
Human-Level Performance on ImageNet Classification
<https://arxiv.org/abs/1502.01852>`_.
To try PReLU instead of ReLU, replace ``F.relu`` with individual ``PReLU``
links registered to the model. For example, the model defined in the
`MNIST example
<https://github.com/chainer/chainer/tree/master/examples/mnist/train_mnist.py>`_
can be rewritten as follows.
ReLU version (original)::
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
self.l1 = L.Linear(None, n_units)
self.l2 = L.Linear(None, n_units)
self.l3 = L.Linear(None, n_out)
def forward(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
PReLU version::
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
self.l1 = L.Linear(None, n_units)
self.a1 = L.PReLU()
self.l2 = L.Linear(None, n_units)
self.a2 = L.PReLU()
self.l3 = L.Linear(None, n_out)
def forward(self, x):
h1 = self.a1(self.l1(x))
h2 = self.a2(self.l2(h1))
return self.l3(h2)
.. seealso:: :func:`chainer.functions.prelu`
Attributes:
W (~chainer.Parameter): Coefficient of parametric ReLU.
"""
def __init__(self, shape=(), init=0.25):
super(PReLU, self).__init__()
with self.init_scope():
self.W = variable.Parameter(init, shape)
def forward(self, x):
"""Applies the parametric ReLU activation function.
Args:
x (~chainer.Variable): Input variable.
Returns:
~chainer.Variable: Output of the parametric ReLU function.
"""
return prelu.prelu(x, self.W)
| 2,445
| 29.197531
| 84
|
py
|
chainer
|
chainer-master/chainer/links/connection/inceptionbn.py
|
import chainer
from chainer.functions.activation import relu
from chainer.functions.array import concat
from chainer.functions.pooling import average_pooling_2d
from chainer.functions.pooling import max_pooling_nd
from chainer import link
from chainer.links.connection import convolution_2d
from chainer.links.normalization import batch_normalization
class InceptionBN(link.Chain):
"""Inception module of the new GoogLeNet with BatchNormalization.
This chain acts like :class:`Inception`, while InceptionBN uses the
:class:`BatchNormalization` on top of each convolution, the 5x5 convolution
path is replaced by two consecutive 3x3 convolution applications, and the
pooling method is configurable.
See: `Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/abs/1502.03167>`_.
Args:
in_channels (int or None): Number of channels of input arrays.
out1 (int): Output size of the 1x1 convolution path.
proj3 (int): Projection size of the single 3x3 convolution path.
out3 (int): Output size of the single 3x3 convolution path.
proj33 (int): Projection size of the double 3x3 convolutions path.
out33 (int): Output size of the double 3x3 convolutions path.
pooltype (str): Pooling type. It must be either ``'max'`` or ``'avg'``.
proj_pool (int or None): Projection size in the pooling path. If
``None``, no projection is done.
stride (int): Stride parameter of the last convolution of each path.
conv_init (:ref:`initializer <initializer>`): Initializer to
initialize the convolution matrix weights.
When it is :class:`numpy.ndarray`, its ``ndim`` should be 4.
dtype (numpy.dtype): Type to use in
:class:`BatchNormalization`.
.. seealso:: :class:`Inception`
"""
def __init__(self, in_channels, out1, proj3, out3, proj33, out33,
pooltype, proj_pool=None, stride=1, conv_init=None,
dtype=None):
super(InceptionBN, self).__init__()
self.out1 = out1
self.proj_pool = proj_pool
self.stride = stride
self.pooltype = pooltype
if pooltype != 'max' and pooltype != 'avg':
raise NotImplementedError()
dtype = chainer.get_dtype(dtype)
with self.init_scope():
self.proj3 = convolution_2d.Convolution2D(
in_channels, proj3, 1, nobias=True, initialW=conv_init)
self.conv3 = convolution_2d.Convolution2D(
proj3, out3, 3, pad=1, stride=stride, nobias=True,
initialW=conv_init)
self.proj33 = convolution_2d.Convolution2D(
in_channels, proj33, 1, nobias=True, initialW=conv_init)
self.conv33a = convolution_2d.Convolution2D(
proj33, out33, 3, pad=1, nobias=True, initialW=conv_init)
self.conv33b = convolution_2d.Convolution2D(
out33, out33, 3, pad=1, stride=stride, nobias=True,
initialW=conv_init)
self.proj3n = batch_normalization.BatchNormalization(
proj3, dtype=dtype)
self.conv3n = batch_normalization.BatchNormalization(
out3, dtype=dtype)
self.proj33n = batch_normalization.BatchNormalization(
proj33, dtype=dtype)
self.conv33an = batch_normalization.BatchNormalization(
out33, dtype=dtype)
self.conv33bn = batch_normalization.BatchNormalization(
out33, dtype=dtype)
if out1 > 0:
assert stride == 1
assert proj_pool is not None
self.conv1 = convolution_2d.Convolution2D(
in_channels, out1, 1, stride=stride, nobias=True,
initialW=conv_init)
self.conv1n = batch_normalization.BatchNormalization(
out1, dtype=dtype)
if proj_pool is not None:
self.poolp = convolution_2d.Convolution2D(
in_channels, proj_pool, 1, nobias=True, initialW=conv_init)
self.poolpn = batch_normalization.BatchNormalization(
proj_pool, dtype=dtype)
def forward(self, x):
outs = []
if self.out1 > 0:
h1 = self.conv1(x)
h1 = self.conv1n(h1)
h1 = relu.relu(h1)
outs.append(h1)
h3 = relu.relu(self.proj3n(self.proj3(x)))
h3 = relu.relu(self.conv3n(self.conv3(h3)))
outs.append(h3)
h33 = relu.relu(self.proj33n(self.proj33(x)))
h33 = relu.relu(self.conv33an(self.conv33a(h33)))
h33 = relu.relu(self.conv33bn(self.conv33b(h33)))
outs.append(h33)
if self.pooltype == 'max':
p = max_pooling_nd.max_pooling_2d(x, 3, stride=self.stride, pad=1,
cover_all=False)
else:
p = average_pooling_2d.average_pooling_2d(x, 3, stride=self.stride,
pad=1)
if self.proj_pool is not None:
p = relu.relu(self.poolpn(self.poolp(p)))
outs.append(p)
y = concat.concat(outs, axis=1)
return y
| 5,325
| 41.608
| 79
|
py
|
chainer
|
chainer-master/chainer/links/connection/highway.py
|
from chainer.functions.activation import relu
from chainer.functions.activation import sigmoid
from chainer import link
from chainer.links.connection import linear
class Highway(link.Chain):
"""Highway module.
In highway network, two gates are added to the ordinal non-linear
transformation (:math:`H(x) = activate(W_h x + b_h)`).
One gate is the transform gate :math:`T(x) = \\sigma(W_t x + b_t)`, and the
other is the carry gate :math:`C(x)`.
For simplicity, the author defined :math:`C = 1 - T`.
Highway module returns :math:`y` defined as
.. math::
y = activate(W_h x + b_h) \\odot \\sigma(W_t x + b_t) +
x \\odot(1 - \\sigma(W_t x + b_t))
The output array has the same spatial size as the input. In order to
satisfy this, :math:`W_h` and :math:`W_t` must be square matrices.
Args:
in_out_size (int): Dimension of input and output vectors.
nobias (bool): If ``True``, then this function does not use the bias.
activate: Activation function of plain array. :math:`tanh` is also
available.
init_Wh (:ref:`initializer <initializer>`): Initializer to
initialize the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be 2.
init_bh (:ref:`initializer <initializer>`): Initializer to
initialize the bias. If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
init_Wt (:ref:`initializer <initializer>`): Initializer to
initialize the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be 2.
init_bt (:ref:`initializer <initializer>`): Initializer to
initialize the bias. If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
Negative value is recommended by the author of the paper.
(e.g. -1, -3, ...).
See:
`Highway Networks <https://arxiv.org/abs/1505.00387>`_.
"""
def __init__(self, in_out_size, nobias=False, activate=relu.relu,
init_Wh=None, init_Wt=None, init_bh=None, init_bt=-1):
super(Highway, self).__init__()
self.activate = activate
with self.init_scope():
self.plain = linear.Linear(
in_out_size, in_out_size, nobias=nobias,
initialW=init_Wh, initial_bias=init_bh)
self.transform = linear.Linear(
in_out_size, in_out_size, nobias=nobias,
initialW=init_Wt, initial_bias=init_bt)
def forward(self, x):
"""Computes the output of the Highway module.
Args:
x (~chainer.Variable): Input variable.
Returns:
~chainer.Variable: Output variable. Its array has the same spatial
size and the same minibatch size as the input array.
"""
out_plain = self.activate(self.plain(x))
out_transform = sigmoid.sigmoid(self.transform(x))
y = out_plain * out_transform + x * (1 - out_transform)
return y
| 3,146
| 40.407895
| 79
|
py
|
chainer
|
chainer-master/chainer/links/connection/bilinear.py
|
import numpy
from chainer.backends import cuda
from chainer.functions.connection import bilinear
from chainer import initializers
from chainer import link
from chainer import variable
class Bilinear(link.Link):
"""Bilinear layer that performs tensor multiplication.
Bilinear is a primitive link that wraps the
:func:`~chainer.functions.bilinear` functions. It holds parameters ``W``,
``V1``, ``V2``, and ``b`` corresponding to the arguments of
:func:`~chainer.functions.bilinear`.
Args:
left_size (int): Dimension of input vector :math:`e^1` (:math:`J`)
right_size (int): Dimension of input vector :math:`e^2` (:math:`K`)
out_size (int): Dimension of output vector :math:`y` (:math:`L`)
nobias (bool): If ``True``, parameters ``V1``, ``V2``, and ``b`` are
omitted.
initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be 3.
initial_bias (tuple of :ref:`initializer <initializer>`):
Initial values of :math:`V^1`, :math:`V^2` and
:math:`b`. The length of this argument must be 3.
Each element of this tuple must have the shapes of
``(left_size, out_size)``, ``(right_size, out_size)``, and
``(out_size,)``, respectively if they are :class:`numpy.ndarray`.
If ``None``, :math:`V^1` and :math:`V^2` are initialized
by the default initializer and :math:`b` is set to :math:`0`.
.. seealso:: See :func:`chainer.functions.bilinear` for details.
Attributes:
W (~chainer.Variable): Bilinear weight parameter.
V1 (~chainer.Variable): Linear weight parameter for the first argument.
V2 (~chainer.Variable): Linear weight parameter for the second
argument.
b (~chainer.Variable): Bias parameter.
"""
def __init__(self, left_size, right_size, out_size, nobias=False,
initialW=None, initial_bias=None):
super(Bilinear, self).__init__()
self.in_sizes = (left_size, right_size)
self.nobias = nobias
# TODO(Kenta OONO): I do not know appropriate way of
# initializing weights in tensor network.
# This initialization is a modification of
# that of Linear function.
with self.init_scope():
shape = (left_size, right_size, out_size)
if isinstance(initialW, (numpy.ndarray, cuda.ndarray)):
assert initialW.shape == shape
self.W = variable.Parameter(
initializers._get_initializer(initialW), shape)
if not self.nobias:
V1_shape = (left_size, out_size)
V2_shape = (right_size, out_size)
b_shape = (out_size,)
if isinstance(initial_bias, tuple):
initialV1, initialV2, initialb = initial_bias
if isinstance(initialV1, (numpy.ndarray, cuda.ndarray)):
assert initialV1.shape == V1_shape
if isinstance(initialV2, (numpy.ndarray, cuda.ndarray)):
assert initialV2.shape == V2_shape
if isinstance(initialb, (numpy.ndarray, cuda.ndarray)):
assert initialb.shape == b_shape
initialV1 = initializers._get_initializer(initialV1)
initialV2 = initializers._get_initializer(initialV2)
initialb = initializers._get_initializer(initialb)
elif initial_bias is None:
initialV1 = initializers._get_initializer(None)
initialV2 = initializers._get_initializer(None)
initialb = 0
else:
raise ValueError('initial_bias must be tuple or None')
self.V1 = variable.Parameter(initialV1, V1_shape)
self.V2 = variable.Parameter(initialV2, V2_shape)
self.b = variable.Parameter(initialb, b_shape)
def forward(self, e1, e2):
"""Applies the bilinear function to inputs and the internal parameters.
Args:
e1 (~chainer.Variable): Left input.
e2 (~chainer.Variable): Right input.
Returns:
~chainer.Variable: Output variable.
"""
if self.nobias:
return bilinear.bilinear(e1, e2, self.W)
else:
return bilinear.bilinear(e1, e2, self.W, self.V1, self.V2, self.b)
def zero_grads(self):
# Left for backward compatibility
self.zerograds()
| 4,659
| 40.981982
| 79
|
py
|
chainer
|
chainer-master/chainer/links/connection/inception.py
|
from chainer.functions.activation import relu
from chainer.functions.array import concat
from chainer.functions.pooling import max_pooling_nd
from chainer import link
from chainer.links.connection import convolution_2d
class Inception(link.Chain):
"""Inception module of GoogLeNet.
It applies four different functions to the input array and concatenates
their outputs along the channel dimension. Three of them are 2D
convolutions of sizes 1x1, 3x3 and 5x5. Convolution paths of 3x3 and 5x5
sizes have 1x1 convolutions (called projections) ahead of them. The other
path consists of 1x1 convolution (projection) and 3x3 max pooling.
The output array has the same spatial size as the input. In order to
satisfy this, Inception module uses appropriate padding for each
convolution and pooling.
See: `Going Deeper with Convolutions <https://arxiv.org/abs/1409.4842>`_.
Args:
in_channels (int or None): Number of channels of input arrays.
out1 (int): Output size of 1x1 convolution path.
proj3 (int): Projection size of 3x3 convolution path.
out3 (int): Output size of 3x3 convolution path.
proj5 (int): Projection size of 5x5 convolution path.
out5 (int): Output size of 5x5 convolution path.
proj_pool (int): Projection size of max pooling path.
conv_init (:ref:`initializer <initializer>`): Initializer to
initialize the convolution matrix weights.
When it is :class:`numpy.ndarray`, its ``ndim`` should be 4.
bias_init (:ref:`initializer <initializer>`): Initializer to
initialize the convolution matrix weights.
When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
"""
def __init__(self, in_channels, out1, proj3, out3, proj5, out5, proj_pool,
conv_init=None, bias_init=None):
super(Inception, self).__init__()
with self.init_scope():
self.conv1 = convolution_2d.Convolution2D(
in_channels, out1, 1, initialW=conv_init,
initial_bias=bias_init)
self.proj3 = convolution_2d.Convolution2D(
in_channels, proj3, 1, initialW=conv_init,
initial_bias=bias_init)
self.conv3 = convolution_2d.Convolution2D(
proj3, out3, 3, pad=1, initialW=conv_init,
initial_bias=bias_init)
self.proj5 = convolution_2d.Convolution2D(
in_channels, proj5, 1, initialW=conv_init,
initial_bias=bias_init)
self.conv5 = convolution_2d.Convolution2D(
proj5, out5, 5, pad=2, initialW=conv_init,
initial_bias=bias_init)
self.projp = convolution_2d.Convolution2D(
in_channels, proj_pool, 1, initialW=conv_init,
initial_bias=bias_init)
def forward(self, x):
"""Computes the output of the Inception module.
Args:
x (~chainer.Variable): Input variable.
Returns:
~chainer.Variable: Output variable. Its array has the same spatial
size and the same minibatch size as the input array. The channel
dimension has size ``out1 + out3 + out5 + proj_pool``.
"""
out1 = self.conv1(x)
out3 = self.conv3(relu.relu(self.proj3(x)))
out5 = self.conv5(relu.relu(self.proj5(x)))
pool = self.projp(max_pooling_nd.max_pooling_2d(
x, 3, stride=1, pad=1))
y = relu.relu(concat.concat((out1, out3, out5, pool), axis=1))
return y
| 3,601
| 42.39759
| 78
|
py
|
chainer
|
chainer-master/chainer/links/connection/scale.py
|
import chainer
from chainer.functions.math import scale
from chainer import link
from chainer.links.connection import bias
from chainer import variable
class Scale(link.Chain):
"""Broadcasted elementwise product with learnable parameters.
Computes a elementwise product as :func:`~chainer.functions.scale`
function does except that its second input is a learnable weight parameter
:math:`W` the link has.
Args:
axis (int): The first axis of the first input of
:func:`~chainer.functions.scale` function along which its second
input is applied.
W_shape (tuple of ints): Shape of learnable weight parameter. If
``None``, this link does not have learnable weight parameter so an
explicit weight needs to be given to its ``forward`` method's
second input.
bias_term (bool): Whether to also learn a bias (equivalent to Scale
link + Bias link).
bias_shape (tuple of ints): Shape of learnable bias. If ``W_shape`` is
``None``, this should be given to determine the shape. Otherwise,
the bias has the same shape ``W_shape`` with the weight parameter
and ``bias_shape`` is ignored.
.. seealso:: See :func:`~chainer.functions.scale` for details.
Attributes:
W (~chainer.Parameter): Weight parameter if ``W_shape`` is given.
Otherwise, no W attribute.
bias (~chainer.links.Bias): Bias term if ``bias_term`` is ``True``.
Otherwise, no bias attribute.
"""
def __init__(self, axis=1, W_shape=None, bias_term=False, bias_shape=None):
super(Scale, self).__init__()
self.axis = axis
with self.init_scope():
# Add W parameter and/or bias term.
if W_shape is not None:
self.W = variable.Parameter(1, W_shape)
if bias_term:
self.bias = bias.Bias(axis, W_shape)
else:
if bias_term:
if bias_shape is None:
raise ValueError(
'bias_shape should be given if W is not '
'learnt parameter and bias_term is True.')
self.bias = bias.Bias(axis, bias_shape)
def forward(self, *xs):
"""Applies broadcasted elementwise product.
Args:
xs (list of Variables): Input variables whose length should
be one if the link has a learnable weight parameter, otherwise
should be two.
"""
axis = self.axis
# Case of only one argument where W is a learnt parameter.
if hasattr(self, 'W'):
if chainer.is_debug():
assert len(xs) == 1
x, = xs
W = self.W
z = scale.scale(x, W, axis)
# Case of two arguments where W is given as an argument.
else:
if chainer.is_debug():
assert len(xs) == 2
x, y = xs
z = scale.scale(x, y, axis)
# Forward propagate bias term if given.
if hasattr(self, 'bias'):
return self.bias(z)
else:
return z
| 3,229
| 36.126437
| 79
|
py
|
chainer
|
chainer-master/chainer/links/connection/deconvolution_2d.py
|
import numpy
from chainer.backends import cuda
from chainer.functions.connection import deconvolution_2d
from chainer import initializers
from chainer import link
from chainer.utils import argument
from chainer import variable
class Deconvolution2D(link.Link):
"""__init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, \
nobias=False, outsize=None, initialW=None, initial_bias=None, *, dilate=1, \
groups=1)
Two dimensional deconvolution function.
This link wraps the :func:`~chainer.functions.deconvolution_2d` function
and holds the filter weight and bias vector as parameters.
Deconvolution links can use a feature of cuDNN called autotuning, which
selects the most efficient CNN algorithm for images of fixed-size,
can provide a significant performance boost for fixed neural nets.
To enable, set `chainer.using_config('autotune', True)`
Args:
in_channels (int or None): Number of channels of input arrays.
If ``None``, parameter initialization will be deferred until the
first forward data pass at which time the size will be determined.
out_channels (int): Number of channels of output arrays.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
nobias (bool): If ``True``, then this function does not use the bias
term.
outsize (tuple): Expected output size of deconvolutional operation.
It should be pair of height and width :math:`(out_H, out_W)`.
Default value is ``None`` and the outsize is estimated by
input size, stride and pad.
initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be 4.
initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias. If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
dilate (:class:`int` or :class:`tuple` of :class:`int` s):
Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
groups (int): The number of groups to use grouped deconvolution. The
default is one, where grouped deconvolution is not used.
The filter weight has four dimensions :math:`(c_I, c_O, k_H, k_W)`
which indicate the number of input channels, output channels,
height and width of the kernels, respectively.
The filter weight is initialized with i.i.d. Gaussian random samples, each
of which has zero mean and deviation :math:`\\sqrt{1/(c_I k_H k_W)}` by
default.
The bias vector is of size :math:`c_O`.
Its elements are initialized by ``bias`` argument.
If ``nobias`` argument is set to True, then this function does not hold
the bias parameter.
The output of this function can be non-deterministic when it uses cuDNN.
If ``chainer.configuration.config.cudnn_deterministic`` is ``True`` and
cuDNN version is >= v3, it forces cuDNN to use a deterministic algorithm.
.. seealso::
See :func:`chainer.functions.deconvolution_2d` for the definition of
two-dimensional convolution.
.. seealso::
See :func:`chainer.links.Convolution2D` for the examples of ways to
give arguments to this link.
.. admonition:: Example
There are several ways to make a Deconvolution2D link.
Let an input vector ``x`` be:
>>> x = np.arange(1 * 3 * 10 * 10, dtype=np.float32).reshape(
... 1, 3, 10, 10)
1. Give the first three arguments explicitly:
In this case, all the other arguments are set to the default
values.
>>> l = L.Deconvolution2D(3, 7, 4)
>>> y = l(x)
>>> y.shape
(1, 7, 13, 13)
2. Omit ``in_channels`` or fill it with ``None``:
The below two cases are the same.
>>> l = L.Deconvolution2D(7, 4)
>>> y = l(x)
>>> y.shape
(1, 7, 13, 13)
>>> l = L.Deconvolution2D(None, 7, 4)
>>> y = l(x)
>>> y.shape
(1, 7, 13, 13)
When you omit the first argument, you need to specify the other
subsequent arguments from ``stride`` as keyword arguments. So the
below two cases are the same.
>>> l = L.Deconvolution2D(None, 7, 4, 2, 1)
>>> y = l(x)
>>> y.shape
(1, 7, 20, 20)
>>> l = L.Deconvolution2D(7, 4, stride=2, pad=1)
>>> y = l(x)
>>> y.shape
(1, 7, 20, 20)
"""
def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0,
nobias=False, outsize=None, initialW=None, initial_bias=None,
**kwargs):
super(Deconvolution2D, self).__init__()
dilate, groups, = argument.parse_kwargs(
kwargs, ('dilate', 1), ('groups', 1),
deterministic='deterministic argument is not supported anymore. '
'Use chainer.using_config(\'cudnn_deterministic\', value) '
'context where value is either `True` or `False`.')
if ksize is None:
out_channels, ksize, in_channels = in_channels, out_channels, None
self.ksize = ksize
self.stride = _pair(stride)
self.pad = _pair(pad)
self.dilate = _pair(dilate)
self.outsize = (None, None) if outsize is None else outsize
self.out_channels = out_channels
self.groups = int(groups)
with self.init_scope():
W_initializer = initializers._get_initializer(initialW)
self.W = variable.Parameter(W_initializer)
if in_channels is not None:
self._initialize_params(in_channels)
if nobias:
self.b = None
else:
if isinstance(initial_bias, (numpy.ndarray, cuda.ndarray)):
assert initial_bias.shape == (out_channels,)
if initial_bias is None:
initial_bias = 0
bias_initializer = initializers._get_initializer(initial_bias)
self.b = variable.Parameter(bias_initializer, out_channels)
def _initialize_params(self, in_channels):
kh, kw = _pair(self.ksize)
if self.out_channels % self.groups != 0:
raise ValueError('the number of output channels must be '
'divisible by the number of groups')
if in_channels % self.groups != 0:
raise ValueError('the number of input channels must be '
'divisible by the number of groups')
W_shape = (in_channels, int(self.out_channels / self.groups), kh, kw)
self.W.initialize(W_shape)
def forward(self, x):
if self.W.array is None:
self._initialize_params(x.shape[1])
return deconvolution_2d.deconvolution_2d(
x, self.W, self.b, self.stride, self.pad, self.outsize,
dilate=self.dilate, groups=self.groups)
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
| 7,614
| 39.078947
| 79
|
py
|
chainer
|
chainer-master/chainer/links/connection/convolution_nd.py
|
from chainer.functions.connection import convolution_nd
from chainer import initializers
from chainer import link
from chainer.utils import argument
from chainer.utils import conv_nd
from chainer import variable
class ConvolutionND(link.Link):
"""N-dimensional convolution layer.
This link wraps the :func:`~chainer.functions.convolution_nd` function and
holds the filter weight and bias vector as parameters.
Convolution links can use a feature of cuDNN called autotuning, which
selects the most efficient CNN algorithm for images of fixed-size,
can provide a significant performance boost for fixed neural nets.
To enable, set `chainer.using_config('autotune', True)`
Args:
ndim (int): Number of spatial dimensions.
in_channels (int): Number of channels of input arrays.
If ``None``, parameter initialization will be deferred until the
first forward data pass at which time the size will be determined.
out_channels (int): Number of channels of output arrays.
ksize (int or tuple of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints): Stride of filter application.
``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent.
pad (int or tuple of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
nobias (bool): If ``True``, then this function does not use the bias.
initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be :math:`n+2` where :math:`n` is
the number of spatial dimensions.
initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias. If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should 1.
cover_all (bool): If ``True``, all spatial locations are convoluted
into some output pixels. It may make the output size larger.
``cover_all`` needs to be ``False`` if you want to use cuDNN.
dilate (:class:`int` or :class:`tuple` of :class:`int` s):
Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d, ..., d)`` are equivalent.
groups (:class:`int`):
The number of groups to use grouped convolution.
The default is one, where grouped convolution is not used.
.. seealso::
See :func:`~chainer.functions.convolution_nd` for the definition of
N-dimensional convolution. See
:func:`~chainer.functions.convolution_2d` for the definition of
two-dimensional convolution.
Attributes:
W (~chainer.Variable): Weight parameter.
b (~chainer.Variable): Bias parameter. If ``initial_bias`` is ``None``,
set to ``None``.
.. admonition:: Example
There are several ways to make a ConvolutionND link.
Let an input vector ``x`` be:
>>> x = np.arange(2 * 5 * 5 * 5, dtype=np.float32).reshape(
... 1, 2, 5, 5, 5)
1. Give the first four arguments explicitly:
>>> l = L.ConvolutionND(3, 2, 7, 4)
>>> y = l(x)
>>> y.shape
(1, 7, 2, 2, 2)
2. Omit ``in_channels`` or fill it with ``None``:
The below two cases are the same.
>>> l = L.ConvolutionND(3, 7, 4)
>>> y = l(x)
>>> y.shape
(1, 7, 2, 2, 2)
>>> l = L.ConvolutionND(3, None, 7, 4)
>>> y = l(x)
>>> y.shape
(1, 7, 2, 2, 2)
When you omit the second argument, you need to specify the other
subsequent arguments from ``stride`` as keyword auguments. So the
below two cases are the same.
>>> l = L.ConvolutionND(3, 7, 4, stride=1, pad=0)
>>> y = l(x)
>>> y.shape
(1, 7, 2, 2, 2)
>>> l = L.ConvolutionND(3, None, 7, 4, 1, 0)
>>> y = l(x)
>>> y.shape
(1, 7, 2, 2, 2)
"""
def __init__(self, ndim, in_channels, out_channels, ksize=None, stride=1,
pad=0, nobias=False, initialW=None, initial_bias=None,
cover_all=False, dilate=1, groups=1):
super(ConvolutionND, self).__init__()
if ksize is None:
out_channels, ksize, in_channels = \
in_channels, out_channels, None
self.out_channels = out_channels
self.ksize = conv_nd.as_tuple(ksize, ndim)
self.stride = stride
self.pad = pad
self.cover_all = cover_all
self.dilate = conv_nd.as_tuple(dilate, ndim)
self.groups = int(groups)
with self.init_scope():
W_initializer = initializers._get_initializer(initialW)
self.W = variable.Parameter(W_initializer)
if in_channels is not None:
self._initialize_params(in_channels)
if nobias:
self.b = None
else:
if initial_bias is None:
initial_bias = 0
initial_bias = initializers._get_initializer(initial_bias)
self.b = variable.Parameter(initial_bias, out_channels)
def _initialize_params(self, in_channels):
if self.out_channels % self.groups != 0:
raise ValueError('the number of output channels must be'
' divisible by the number of groups')
if in_channels % self.groups != 0:
raise ValueError('the number of input channels must be'
' divisible by the number of groups')
W_shape = (
self.out_channels, int(in_channels / self.groups)) + self.ksize
self.W.initialize(W_shape)
@classmethod
def from_params(cls, W, b=None, stride=1, pad=0, nobias=False, **kwargs):
"""from_params(cls, W, b=None, stride=1, pad=0, \
nobias=False, *, cover_all=False, dilate=1, groups=1)
Initialize a :class:`~chainer.links.ConvolutionND` with given
parameters.
This method uses ``W`` and optional ``b`` to initialize an :math:`N` D
convolution layer.
Args:
W (:class:`~chainer.Variable` or :ref:`ndarray`):
The weight parameter.
b (:class:`~chainer.Variable`, :ref:`ndarray`, or None):
The weight parameter.
ksize (int or tuple of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints): Stride of filter application.
``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent.
pad (int or tuple of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
nobias (bool):
If ``True``, then this function does not use the bias.
cover_all (bool): If ``True``, all spatial locations are convoluted
into some output pixels. It may make the output size larger.
``cover_all`` needs to be ``False`` if you want to use cuDNN.
dilate (:class:`int` or :class:`tuple` of :class:`int` s):
Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d, ..., d)`` are equivalent.
groups (:class:`int`):
The number of groups to use grouped convolution.
The default is one, where grouped convolution is not used.
"""
# TODO(crcrpar): Support the below conditions.
# - W (and b) of cupy on non-default GPUs like id=1.
# - W (and b) of chainerx on cuda.
cover_all, dilate, groups = argument.parse_kwargs(
kwargs, ('cover_all', False), ('dilate', 1), ('groups', 1))
out_channels, _in_channels, *ksize = W.shape
in_channels = _in_channels * groups
if b is not None:
if out_channels != b.size:
raise ValueError(
'`out_channels` does not match the size of `b`')
link = cls(
len(ksize), in_channels, out_channels, ksize, stride, pad, nobias,
initialW=variable.as_array(W), initial_bias=variable.as_array(b),
cover_all=cover_all, dilate=dilate, groups=groups)
return link
def forward(self, x):
"""Applies N-dimensional convolution layer.
Args:
x (~chainer.Variable): Input image.
Returns:
~chainer.Variable: Output of convolution.
"""
if self.W.array is None:
self._initialize_params(x.shape[1])
return convolution_nd.convolution_nd(
x, self.W, self.b, self.stride, self.pad, cover_all=self.cover_all,
dilate=self.dilate, groups=self.groups)
class Convolution1D(ConvolutionND):
"""1-dimensional convolution layer.
.. note::
This link wraps :class:`~chainer.links.ConvolutionND` by giving 1 to
the first argument ``ndim``, so see the details of the behavior in
the documentation of :class:`~chainer.links.ConvolutionND`.
"""
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,
nobias=False, initialW=None, initial_bias=None,
cover_all=False, dilate=1, groups=1):
super(Convolution1D, self).__init__(
1, in_channels, out_channels, ksize, stride, pad, nobias, initialW,
initial_bias, cover_all, dilate, groups)
class Convolution3D(ConvolutionND):
"""3-dimensional convolution layer.
.. note::
This link wraps :class:`~chainer.links.ConvolutionND` by giving 3 to
the first argument ``ndim``, so see the details of the behavior in
the documentation of :class:`~chainer.links.ConvolutionND`.
"""
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,
nobias=False, initialW=None, initial_bias=None,
cover_all=False, dilate=1, groups=1):
super(Convolution3D, self).__init__(
3, in_channels, out_channels, ksize, stride, pad, nobias, initialW,
initial_bias, cover_all, dilate, groups)
| 10,487
| 40.291339
| 79
|
py
|
chainer
|
chainer-master/chainer/links/connection/linear.py
|
import typing as tp # NOQA
from chainer.functions.connection import linear
from chainer import initializers
from chainer import link
from chainer import types # NOQA
from chainer import utils
from chainer import variable
class Linear(link.Link):
"""Linear layer (a.k.a.\\ fully-connected layer).
This is a link that wraps the :func:`~chainer.functions.linear` function,
and holds a weight matrix ``W`` and optionally a bias vector ``b`` as
parameters.
If ``initialW`` is left to the default value of ``None``, the weight matrix
``W`` is initialized with i.i.d. Gaussian samples, each of which has zero
mean and deviation :math:`\\sqrt{1/\\text{in_size}}`. The bias vector ``b``
is of size ``out_size``. If the ``initial_bias`` is to left the default
value of ``None``, each element is initialized as zero. If the ``nobias``
argument is set to ``True``, then this link does not hold a bias vector.
Args:
in_size (int or None): Dimension of input vectors. If unspecified or
``None``, parameter initialization will be deferred until the
first forward data pass at which time the size will be determined.
out_size (int): Dimension of output vectors. If only one value is
passed for ``in_size`` and ``out_size``, that value will be used
for the ``out_size`` dimension.
nobias (bool): If ``True``, then this function does not use the bias.
initialW (:ref:`initializer <initializer>`): Initializer to initialize
the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be 2. If ``initialW`` is ``None``, then the
weights are initialized with i.i.d. Gaussian samples, each of which
has zero mean and deviation :math:`\\sqrt{1/\\text{in_size}}`.
initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias. If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
.. seealso:: :func:`~chainer.functions.linear`
Attributes:
W (~chainer.Variable): Weight parameter.
b (~chainer.Variable): Bias parameter.
.. admonition:: Example
There are several ways to make a Linear link.
Define an input vector ``x`` as:
>>> x = np.array([[0, 1, 2, 3, 4]], np.float32)
1. Give the first two arguments explicitly:
Those numbers are considered as the input size and the output size.
>>> l = L.Linear(5, 10)
>>> y = l(x)
>>> y.shape
(1, 10)
2. Omit ``in_size`` (give the output size only as the first argument)
or fill it with ``None``:
In this case, the size of second axis of ``x`` is used as the
input size. So the below two cases are the same.
>>> l = L.Linear(10)
>>> y = l(x)
>>> y.shape
(1, 10)
>>> l = L.Linear(None, 10)
>>> y = l(x)
>>> y.shape
(1, 10)
When you omit the first argument, you need to specify the other
subsequent arguments from ``nobias`` as keyword arguments. So the
below two cases are the same.
>>> l = L.Linear(None, 10, False, None, 0)
>>> y = l(x)
>>> y.shape
(1, 10)
>>> l = L.Linear(10, nobias=False, initialW=None, initial_bias=0)
>>> y = l(x)
>>> y.shape
(1, 10)
"""
def __init__(
self,
in_size: tp.Optional[int],
out_size: tp.Optional[int] = None,
nobias: bool = False,
initialW: tp.Optional[types.InitializerSpec] = None,
initial_bias: tp.Optional[types.InitializerSpec] = None
) -> None:
super(Linear, self).__init__()
if out_size is None:
in_size, out_size = None, in_size
self.in_size = in_size
self.out_size = out_size
with self.init_scope():
W_initializer = initializers._get_initializer(initialW)
self.W = variable.Parameter(W_initializer) # type: variable.Variable # NOQA
if in_size is not None:
self._initialize_params(in_size)
if nobias:
self.b = None # type: tp.Optional[variable.Variable]
else:
if initial_bias is None:
initial_bias = 0
bias_initializer = initializers._get_initializer(initial_bias)
self.b = variable.Parameter(bias_initializer, out_size)
def _initialize_params(self, in_size: int) -> None:
self.W.initialize((self.out_size, in_size)) # type: ignore
@classmethod
def from_params(cls, W, b=None, nobias=False):
"""Initialize a :class:`~chainer.links.Linear` with given parameters.
This method uses ``W`` and optional ``b`` to initialize a linear layer.
Args:
W (:class:`~chainer.Variable` or :ref:`ndarray`):
The weight parameter.
b (:class:`~chainer.Variable`, :ref:`ndarray`, or ``None``):
The bias parameter.
nobias (bool): If ``True``, the argument of ``b`` is ignored
in spite of whether it's given or not.
"""
out_size, in_size = W.shape
if b is not None:
if out_size != b.size:
raise ValueError('`out_size` does not match the size of `b`')
link = cls(
in_size, out_size, nobias,
initialW=variable.as_array(W), initial_bias=variable.as_array(b))
return link
@property
def printable_specs(self):
specs = [
('in_size', self.in_size),
('out_size', self.out_size),
('nobias', self.b is None),
]
for spec in specs:
yield spec
def forward(
self,
x: variable.Variable,
n_batch_axes: int = 1
) -> variable.Variable:
"""Applies the linear layer.
Args:
x (~chainer.Variable): Batch of input vectors.
n_batch_axes (int): The number of batch axes. The default is 1. The
input variable is reshaped into
(:math:`{\\rm n\\_batch\\_axes} + 1`)-dimensional tensor.
This should be greater than 0.
Returns:
~chainer.Variable: Output of the linear layer.
"""
if self.W.array is None:
in_size = utils.size_of_shape(x.shape[n_batch_axes:])
self._initialize_params(in_size)
return linear.linear(x, self.W, self.b, n_batch_axes=n_batch_axes)
| 6,784
| 35.875
| 89
|
py
|
chainer
|
chainer-master/chainer/links/connection/deconvolution_nd.py
|
from chainer.functions.connection import deconvolution_nd
from chainer import initializers
from chainer import link
from chainer.utils import conv_nd
from chainer import variable
class DeconvolutionND(link.Link):
"""N-dimensional deconvolution function.
This link wraps :func:`~chainer.functions.deconvolution_nd` function and
holds the filter weight and bias vector as its parameters.
Deconvolution links can use a feature of cuDNN called autotuning, which
selects the most efficient CNN algorithm for images of fixed-size,
can provide a significant performance boost for fixed neural nets.
To enable, set `chainer.using_config('autotune', True)`
Args:
ndim (int): Number of spatial dimensions.
in_channels (int): Number of channels of input arrays.
If ``None``, parameter initialization will be deferred until the
first forward data pass at which time the size will be determined.
out_channels (int): Number of channels of output arrays.
ksize (int or tuple of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints): Stride of filter application.
``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent.
pad (int or tuple of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
nobias (bool): If ``True``, then this function does not use the bias.
outsize (tuple of ints): Expected output size of deconvolutional
operation. It should be a tuple of ints that represents the output
size of each dimension. Default value is ``None`` and the outsize
is estimated with input size, stride and pad.
initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be :math:`n+2` where :math:`n` is
the number of spatial dimensions.
initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias. If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should 1.
dilate (:class:`int` or :class:`tuple` of :class:`int` s):
Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d, ..., d)`` are equivalent.
groups (:class:`int`):
The number of groups to use grouped convolution.
The default is one, where grouped convolution is not used.
.. seealso::
:func:`~chainer.functions.deconvolution_nd`
Attributes:
W (~chainer.Variable): Weight parameter.
b (~chainer.Variable): Bias parameter. If ``initial_bias`` is ``None``,
set to ``None``.
.. admonition:: Example
There are several ways to make a DeconvolutionND link.
Let an input vector ``x`` be:
>>> x = np.arange(2 * 5 * 5 * 5, dtype=np.float32).reshape(
... 1, 2, 5, 5, 5)
1. Give the first four arguments explicitly:
>>> l = L.DeconvolutionND(3, 2, 7, 4)
>>> y = l(x)
>>> y.shape
(1, 7, 8, 8, 8)
2. Omit ``in_channels`` or fill it with ``None``:
The below two cases are the same.
>>> l = L.DeconvolutionND(3, 7, 4)
>>> y = l(x)
>>> y.shape
(1, 7, 8, 8, 8)
>>> l = L.DeconvolutionND(3, None, 7, 4)
>>> y = l(x)
>>> y.shape
(1, 7, 8, 8, 8)
When you omit the second argument, you need to specify the other
subsequent arguments from ``stride`` as keyword auguments. So the
below two cases are the same.
>>> l = L.DeconvolutionND(3, 7, 4, stride=2, pad=1)
>>> y = l(x)
>>> y.shape
(1, 7, 10, 10, 10)
>>> l = L.DeconvolutionND(3, None, 7, 4, 2, 1)
>>> y = l(x)
>>> y.shape
(1, 7, 10, 10, 10)
"""
def __init__(self, ndim, in_channels, out_channels, ksize=None, stride=1,
pad=0, nobias=False, outsize=None, initialW=None,
initial_bias=None, dilate=1, groups=1):
super(DeconvolutionND, self).__init__()
if ksize is None:
out_channels, ksize, in_channels = \
in_channels, out_channels, None
self.out_channels = out_channels
self.ksize = conv_nd.as_tuple(ksize, ndim)
self.stride = stride
self.pad = pad
self.outsize = outsize
self.dilate = conv_nd.as_tuple(dilate, ndim)
self.groups = int(groups)
with self.init_scope():
W_initializer = initializers._get_initializer(initialW)
self.W = variable.Parameter(W_initializer)
if in_channels is not None:
self._initialize_params(in_channels)
if nobias:
self.b = None
else:
if initial_bias is None:
initial_bias = 0
initial_bias = initializers._get_initializer(initial_bias)
self.b = variable.Parameter(initial_bias, out_channels)
def _initialize_params(self, in_channels):
if self.out_channels % self.groups != 0:
raise ValueError('the number of output channels must be '
'divisible by the number of groups')
if in_channels % self.groups != 0:
raise ValueError('the number of input channels must be '
'divisible by the number of groups')
W_shape = (
in_channels, int(self.out_channels / self.groups)) + self.ksize
self.W.initialize(W_shape)
def forward(self, x):
if self.W.array is None:
self._initialize_params(x.shape[1])
return deconvolution_nd.deconvolution_nd(
x, self.W, b=self.b, stride=self.stride, pad=self.pad,
outsize=self.outsize, dilate=self.dilate, groups=self.groups)
class Deconvolution1D(DeconvolutionND):
"""1-dimensional deconvolution layer.
.. note::
This link wraps :class:`~chainer.links.DeconvolutionND` by giving 1 to
the first argument ``ndim``, so see the details of the behavior in
the documentation of :class:`~chainer.links.DeconvolutionND`.
"""
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,
nobias=False, outsize=None, initialW=None, initial_bias=None,
dilate=1, groups=1):
super(Deconvolution1D, self).__init__(
1, in_channels, out_channels, ksize, stride, pad, nobias, outsize,
initialW, initial_bias, dilate, groups)
class Deconvolution3D(DeconvolutionND):
"""3-dimensional deconvolution layer.
.. note::
This link wraps :class:`~chainer.links.DeconvolutionND` by giving 3 to
the first argument ``ndim``, so see the details of the behavior in
the documentation of :class:`~chainer.links.DeconvolutionND`.
"""
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,
nobias=False, outsize=None, initialW=None, initial_bias=None,
dilate=1, groups=1):
super(Deconvolution3D, self).__init__(
3, in_channels, out_channels, ksize, stride, pad, nobias, outsize,
initialW, initial_bias, dilate, groups)
| 7,585
| 38.926316
| 79
|
py
|
chainer
|
chainer-master/chainer/links/connection/parameter.py
|
from chainer.backends import cuda
from chainer.functions.math import identity
from chainer import link
class Parameter(link.Link):
"""Link that just holds a parameter and returns it.
.. deprecated:: v1.5
The parameters are stored as variables since v1.5. Use them directly
instead.
Args:
array: Initial parameter array.
Attributes:
W (~chainer.Variable): Parameter variable.
"""
def __init__(self, array):
super(Parameter, self).__init__()
self.add_param('W', array.shape, dtype=array.dtype)
self.W.array = array
if isinstance(array, cuda.ndarray):
self.to_gpu(cuda.get_device_from_array(array))
def forward(self, volatile='off'):
"""Returns the parameter variable.
Args:
volatile (~chainer.Flag): The volatility of the returned variable.
Returns:
~chainer.Variable: A copy of the parameter variable with given
volatility.
"""
# The first identity creates a copy of W, and the second identity cuts
# the edge if volatility is ON
W = identity.identity(self.W)
W.volatile = volatile
return identity.identity(W)
| 1,231
| 26.377778
| 78
|
py
|
chainer
|
chainer-master/chainer/links/connection/depthwise_convolution_2d.py
|
import numpy
from chainer.functions.connection import depthwise_convolution_2d
from chainer import initializers
from chainer import link
from chainer import variable
class DepthwiseConvolution2D(link.Link):
"""Two-dimensional depthwise convolutional layer.
This link wraps the :func:`~chainer.functions.depthwise_convolution_2d`
function and holds the filter weight and bias vector as parameters.
Args:
in_channels (int): Number of channels of input arrays. If ``None``,
parameter initialization will be deferred until the first forward
data pass at which time the size will be determined.
channel_multiplier (int): Channel multiplier number. Number of output
arrays equal ``in_channels * channel_multiplier``.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
nobias (bool): If ``True``, then this link does not use the bias term.
initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be 4.
initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias. If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
.. seealso::
See :func:`chainer.functions.depthwise_convolution_2d`.
Attributes:
W (~chainer.Variable): Weight parameter.
b (~chainer.Variable): Bias parameter.
"""
def __init__(self, in_channels, channel_multiplier, ksize, stride=1, pad=0,
nobias=False, initialW=None, initial_bias=None):
super(DepthwiseConvolution2D, self).__init__()
self.ksize = ksize
self.stride = _pair(stride)
self.pad = _pair(pad)
self.channel_multiplier = channel_multiplier
self.nobias = nobias
if initialW is None:
initialW = initializers.HeNormal(1. / numpy.sqrt(2))
with self.init_scope():
W_initializer = initializers._get_initializer(initialW)
self.W = variable.Parameter(W_initializer)
if nobias:
self.b = None
else:
if initial_bias is None:
initial_bias = initializers.Constant(0)
bias_initializer = initializers._get_initializer(initial_bias)
self.b = variable.Parameter(bias_initializer)
if in_channels is not None:
self._initialize_params(in_channels)
def _initialize_params(self, in_channels):
kh, kw = _pair(self.ksize)
W_shape = (self.channel_multiplier, in_channels, kh, kw)
self.W.initialize(W_shape)
if self.b is not None:
self.b.initialize(self.channel_multiplier * in_channels)
def forward(self, x):
"""Applies the depthwise convolution layer.
Args:
x (chainer.Variable or :class:`numpy.ndarray` or cupy.ndarray):
Input image.
Returns:
~chainer.Variable: Output of the depthwise convolution.
"""
if self.W.array is None:
self._initialize_params(x.shape[1])
return depthwise_convolution_2d.depthwise_convolution_2d(
x, self.W, self.b, self.stride, self.pad)
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
| 3,761
| 36.62
| 79
|
py
|
chainer
|
chainer-master/chainer/links/connection/local_convolution_2d.py
|
from chainer.functions.connection import local_convolution_2d
from chainer import initializers
from chainer import link
from chainer import variable
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
def _conv_output_length(input_length, filter_size, stride):
output_length = input_length - filter_size + 1
return output_length
class LocalConvolution2D(link.Link):
"""Two-dimensional local convolutional layer.
This link wraps the :func:`~chainer.functions.local_convolution_2d`
function and holds the filter weight and bias array as parameters.
Args:
in_channels (int): Number of channels of input arrays. If either
in_channels or in_size is ``None``,
parameter initialization will be deferred until the first forward
data pass at which time the size will be determined.
out_channels (int): Number of channels of output arrays
in_size (int or pair of ints): Size of each image channel
``in_size=k`` and ``in_size=(k,k)`` are equivalent. If either
in_channels or in_size is ``None``, parameter initialization will
be deferred until the first forward data pass when the size will be
determined.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
nobias (bool): If ``True``, then this link does not use the bias term.
initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be 6.
initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias. If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 3.
.. seealso::
See :func:`chainer.functions.local_convolution_2d`.
Attributes:
W (~chainer.Variable): Weight parameter.
b (~chainer.Variable): Bias parameter.
"""
def __init__(self, in_channels, out_channels, in_size=None, ksize=None,
stride=1, nobias=False, initialW=None, initial_bias=None,
**kwargs):
super(LocalConvolution2D, self).__init__()
self.ksize = ksize
self.stride = _pair(stride)
self.nobias = nobias
self.out_channels = out_channels
with self.init_scope():
W_initializer = initializers._get_initializer(initialW)
self.W = variable.Parameter(W_initializer)
if nobias:
self.b = None
else:
if initial_bias is None:
initial_bias = 0
bias_initializer = initializers._get_initializer(initial_bias)
self.b = variable.Parameter(bias_initializer)
if in_channels is not None and in_size is not None:
self._initialize_params(in_channels, _pair(in_size))
def _initialize_params(self, in_channels, in_size):
kh, kw = _pair(self.ksize)
ih, iw = _pair(in_size)
oh = _conv_output_length(ih, kh, self.stride[0])
ow = _conv_output_length(iw, kw, self.stride[1])
W_shape = (self.out_channels, oh, ow, in_channels, kh, kw)
bias_shape = (self.out_channels, oh, ow,)
self.W.initialize(W_shape)
if not self.nobias:
self.b.initialize(bias_shape)
def forward(self, x):
"""Applies the local convolution layer.
Args:
x (~chainer.Variable): Input image.
Returns:
~chainer.Variable: Output of the convolution.
"""
if self.W.array is None:
self._initialize_params(x.shape[1], x.shape[2:])
return local_convolution_2d.local_convolution_2d(
x, self.W, self.b, self.stride)
| 4,038
| 37.836538
| 79
|
py
|
chainer
|
chainer-master/chainer/links/connection/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/links/connection/deformable_convolution_2d.py
|
from chainer.functions import deformable_convolution_2d_sampler
from chainer import initializers
from chainer.initializers import constant
from chainer import link
from chainer.links.connection.convolution_2d import Convolution2D
from chainer import variable
class DeformableConvolution2D(link.Chain):
"""Two-dimensional deformable convolutional layer.
This link wraps the
convolution layer for offset prediction and
the :func:`~chainer.functions.deformable_convolution_2d_sampler`
function.
This also holds the filter weights and bias vectors of two
convolution layers as parameters.
Args:
in_channels (int): Number of channels of input arrays. If ``None``,
parameter initialization will be deferred until the first forward
data pass at which time the size will be determined.
out_channels (int): Number of channels of output arrays.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
offset_nobias (bool): If ``True``, then this link does not use the
bias term for the first convolution layer.
offset_initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight of the first convolution layer.
When it is :class:`numpy.ndarray`, its ``ndim`` should be 4.
offset_initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias of the first convolution layer.
If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
deform_nobias (bool): If ``True``, then this link does not use the
bias term for the second convolution layer.
deform_initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight for the second convolution layer.
When it is :class:`numpy.ndarray`,
its ``ndim`` should be 4.
deform_initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias for the second convolution layer.
If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
.. seealso::
See :func:`chainer.functions.deformable_convolution_2d_sampler`.
"""
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,
offset_nobias=False, offset_initialW=None,
offset_initial_bias=None,
deform_nobias=False,
deform_initialW=None, deform_initial_bias=None):
super(DeformableConvolution2D, self).__init__()
kh, kw = _pair(ksize)
with self.init_scope():
self.offset_conv = Convolution2D(
in_channels, 2 * kh * kw, ksize, stride, pad,
offset_nobias, offset_initialW, offset_initial_bias)
self.deform_conv = DeformableConvolution2DSampler(
in_channels, out_channels, ksize, stride, pad,
deform_nobias, deform_initialW, deform_initial_bias)
def forward(self, x):
"""Applies the deformable convolution.
Args:
x (~chainer.Variable): Input image.
Returns:
~chainer.Variable: Output of the deformable convolution.
"""
offset = self.offset_conv(x)
return self.deform_conv(x, offset)
class DeformableConvolution2DSampler(link.Link):
"""Apply a two-dimensional deformable convolution layer using offsets"""
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,
nobias=False, initialW=None, initial_bias=None):
super(DeformableConvolution2DSampler, self).__init__()
self.ksize = ksize
self.stride = _pair(stride)
self.pad = _pair(pad)
self.out_channels = out_channels
self.initialW = initialW
if initialW is None:
initialW = constant.Zero()
with self.init_scope():
W_initializer = initializers._get_initializer(initialW)
self.W = variable.Parameter(W_initializer)
if nobias:
self.b = None
else:
if initial_bias is None:
initial_bias = initializers.Constant(0)
bias_initializer = initializers._get_initializer(initial_bias)
self.b = variable.Parameter(bias_initializer)
if in_channels is not None:
self._initialize_params(in_channels)
def _initialize_params(self, in_channels):
kh, kw = _pair(self.ksize)
W_shape = (self.out_channels, in_channels, kh, kw)
self.W.initialize(W_shape)
if self.b is not None:
self.b.initialize(self.out_channels)
def forward(self, x, offset):
if self.W.array is None:
self._initialize_params(x.shape[1])
return deformable_convolution_2d_sampler(
x, offset, self.W, self.b, self.stride, self.pad)
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
| 5,452
| 39.69403
| 78
|
py
|
chainer
|
chainer-master/chainer/links/connection/bias.py
|
import chainer
from chainer.functions.math import bias
from chainer import link
from chainer import variable
class Bias(link.Link):
"""Broadcasted elementwise summation with learnable parameters.
Computes a elementwise summation as :func:`~chainer.functions.bias`
function does except that its second input is a learnable bias parameter
:math:`b` the link has.
Args:
axis (int): The first axis of the first input of
:func:`~chainer.functions.bias` function along which its second
input is applied.
shape (tuple of ints): Shape of the learnable bias parameter. If
``None``, this link does not have learnable parameters so an
explicit bias needs to be given to its ``forward`` method's second
input.
.. seealso:: See :func:`~chainer.functions.bias` for details.
Attributes:
b (~chainer.Variable): Bias parameter if ``shape`` is given. Otherwise,
no attributes.
"""
def __init__(self, axis=1, shape=None):
super(Bias, self).__init__()
# Add b parameter if given.
if shape is not None:
with self.init_scope():
self.b = variable.Parameter(0, shape)
self.axis = axis
def forward(self, *xs):
"""Applies broadcasted elementwise summation.
Args:
xs (list of Variables): Input variables whose length should
be one if the link has a learnable bias parameter, otherwise
should be two.
"""
axis = self.axis
# Case of only one argument where b is a learnt parameter.
if hasattr(self, 'b'):
if chainer.is_debug():
assert len(xs) == 1
x, = xs
b = self.b
return bias.bias(x, b, axis)
# Case of two arguments where b is given as an argument.
else:
if chainer.is_debug():
assert len(xs) == 2
x, y = xs
return bias.bias(x, y, axis)
| 2,045
| 30.96875
| 79
|
py
|
chainer
|
chainer-master/chainer/links/connection/convolution_2d.py
|
import chainer
from chainer.functions.connection import convolution_2d
from chainer import initializers
from chainer import link
from chainer import memory_layouts
from chainer.utils import argument
from chainer import variable
class Convolution2D(link.Link):
"""__init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, \
nobias=False, initialW=None, initial_bias=None, *, dilate=1, groups=1)
Two-dimensional convolutional layer.
This link wraps the :func:`~chainer.functions.convolution_2d` function and
holds the filter weight and bias vector as parameters.
The output of this function can be non-deterministic when it uses cuDNN.
If ``chainer.configuration.config.deterministic`` is ``True`` and
cuDNN version is >= v3, it forces cuDNN to use a deterministic algorithm.
Convolution links can use a feature of cuDNN called autotuning, which
selects the most efficient CNN algorithm for images of fixed-size,
can provide a significant performance boost for fixed neural nets.
To enable, set `chainer.using_config('autotune', True)`
Args:
in_channels (int or None): Number of channels of input arrays.
If ``None``, parameter initialization will be deferred until the
first forward data pass at which time the size will be determined.
out_channels (int): Number of channels of output arrays.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
nobias (bool): If ``True``, then this link does not use the bias term.
initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be 4.
initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias. If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
dilate (int or pair of ints):
Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
groups (:class:`int`): Number of groups of channels. If the number
is greater than 1, input tensor :math:`W` is divided into some
blocks by this value channel-wise. For each tensor blocks,
convolution operation will be executed independently. Input channel
size ``in_channels`` and output channel size ``out_channels`` must
be exactly divisible by this value.
.. seealso::
See :func:`chainer.functions.convolution_2d` for the definition of
two-dimensional convolution.
Attributes:
W (~chainer.Variable): Weight parameter.
b (~chainer.Variable): Bias parameter.
.. admonition:: Example
There are several ways to make a Convolution2D link.
Let an input vector ``x`` be:
>>> x = np.arange(1 * 3 * 10 * 10, dtype=np.float32).reshape(
... 1, 3, 10, 10)
1. Give the first three arguments explicitly:
>>> l = L.Convolution2D(3, 7, 5)
>>> y = l(x)
>>> y.shape
(1, 7, 6, 6)
2. Omit ``in_channels`` or fill it with ``None``:
The below two cases are the same.
>>> l = L.Convolution2D(7, 5)
>>> y = l(x)
>>> y.shape
(1, 7, 6, 6)
>>> l = L.Convolution2D(None, 7, 5)
>>> y = l(x)
>>> y.shape
(1, 7, 6, 6)
When you omit the first argument, you need to specify the other
subsequent arguments from ``stride`` as keyword auguments. So the
below two cases are the same.
>>> l = L.Convolution2D(7, 5, stride=1, pad=0)
>>> y = l(x)
>>> y.shape
(1, 7, 6, 6)
>>> l = L.Convolution2D(None, 7, 5, 1, 0)
>>> y = l(x)
>>> y.shape
(1, 7, 6, 6)
"""
def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0,
nobias=False, initialW=None, initial_bias=None, **kwargs):
super(Convolution2D, self).__init__()
dilate, groups = argument.parse_kwargs(
kwargs, ('dilate', 1), ('groups', 1),
deterministic='deterministic argument is not supported anymore. '
'Use chainer.using_config(\'cudnn_deterministic\', value) '
'context where value is either `True` or `False`.')
if ksize is None:
out_channels, ksize, in_channels = in_channels, out_channels, None
self.cudnn_fast = chainer.get_compute_mode() == 'cudnn_fast'
if self.cudnn_fast:
x_layout = memory_layouts.CUDNN_CHANNEL_LAST_X
w_layout = memory_layouts.CUDNN_CHANNEL_LAST_W
else:
x_layout = memory_layouts.CUDNN_CHANNEL_FIRST_X
w_layout = memory_layouts.CUDNN_CHANNEL_FIRST_W
self.ksize = ksize
self.stride = _pair(stride)
self.pad = _pair(pad)
self.dilate = _pair(dilate)
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = int(groups)
self.x_layout = x_layout
with self.init_scope():
W_initializer = initializers._get_initializer(initialW)
self.W = variable.Parameter(W_initializer, layout=w_layout)
if in_channels is not None:
self._initialize_params(in_channels)
if nobias:
self.b = None
else:
if initial_bias is None:
initial_bias = 0
bias_initializer = initializers._get_initializer(initial_bias)
self.b = variable.Parameter(bias_initializer, out_channels)
@property
def printable_specs(self):
specs = [
('in_channels', self.in_channels),
('out_channels', self.out_channels),
('ksize', self.ksize),
('stride', self.stride),
('pad', self.pad),
('nobias', self.b is None),
('dilate', self.dilate),
('groups', self.groups),
]
for spec in specs:
yield spec
def _initialize_params(self, in_channels):
kh, kw = _pair(self.ksize)
if self.out_channels % self.groups != 0:
raise ValueError('the number of output channels must be'
' divisible by the number of groups')
if in_channels % self.groups != 0:
raise ValueError('the number of input channels must be'
' divisible by the number of groups')
W_shape = (self.out_channels, int(in_channels / self.groups), kh, kw)
self.W.initialize(W_shape)
@classmethod
def from_params(cls, W, b=None, stride=1, pad=0, nobias=False, **kwargs):
"""from_params(cls, W, b=None, stride=1, pad=0, \
nobias=False, *, dilate=1, groups=1)
Initialize a :class:`~chainer.links.Convolution2D` with given
parameters.
This method uses ``W`` and optional ``b`` to initialize
a 2D convolution layer.
Args:
W (:class:`~chainer.Variable` or :ref:`ndarray`):
The weight parameter.
b (:class:`~chainer.Variable`, :ref:`ndarray`, or ``None``):
The bias parameter.
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
nobias (bool): If ``True``, then this link does not use
the bias term in spite of whether ``b`` is given or not.
dilate (int or pair of ints):
Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
groups (:class:`int`): Number of groups of channels. If the number
is greater than 1, input tensor :math:`W` is divided into some
blocks by this value channel-wise. For each tensor blocks,
convolution operation will be executed independently.
Input channel size ``in_channels`` and output channel size
``out_channels`` must be exactly divisible by this value.
"""
# TODO(crcrpar): Support the below conditions.
# - W (and b) of cupy on non-default GPUs like id=1.
# - W (and b) of chainerx on cuda.
dilate, groups = argument.parse_kwargs(
kwargs, ('dilate', 1), ('groups', 1))
out_channels, _in_channels, kw, kh = W.shape
in_channels = _in_channels * groups
if b is not None:
if out_channels != b.size:
raise ValueError(
'`out_channels` does not match the size of `b`')
link = cls(
in_channels, out_channels, (kw, kh), stride, pad, nobias,
initialW=variable.as_array(W), initial_bias=variable.as_array(b),
dilate=dilate, groups=groups)
return link
def forward(self, x):
"""Applies the convolution layer.
Args:
x (~chainer.Variable): Input image.
Returns:
~chainer.Variable: Output of the convolution.
"""
x = chainer.as_variable(x)
assert x.layout == self.x_layout
# self.W can be a Variable instead of Parameter: #8462
# TODO(niboshi): Use Parameter.is_initialized.
if self.W.raw_array is None:
_, c, _, _ = memory_layouts.get_semantic_shape(
x, assumed_layout=self.x_layout)
self._initialize_params(c)
return convolution_2d.convolution_2d(
x, self.W, self.b, self.stride, self.pad, dilate=self.dilate,
groups=self.groups, cudnn_fast=self.cudnn_fast)
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
| 10,408
| 39.344961
| 79
|
py
|
chainer
|
chainer-master/chainer/links/connection/mlp_convolution_2d.py
|
from chainer.functions.activation import relu
from chainer import link
from chainer.links.connection import convolution_2d
from chainer.utils import argument
class MLPConvolution2D(link.ChainList):
"""__init__(self, in_channels, out_channels, ksize=None, stride=1, \
pad=0, activation=relu.relu, conv_init=None, bias_init=None)
Two-dimensional MLP convolution layer of Network in Network.
This is an "mlpconv" layer from the Network in Network paper. This layer
is a two-dimensional convolution layer followed by 1x1 convolution layers
and interleaved activation functions.
Note that it does not apply the activation function to the output of the
last 1x1 convolution layer.
Args:
in_channels (int or None): Number of channels of input arrays.
If it is ``None`` or omitted, parameter initialization will be
deferred until the first forward data pass at which time the size
will be determined.
out_channels (tuple of ints): Tuple of number of channels. The i-th
integer indicates the number of filters of the i-th convolution.
ksize (int or pair of ints): Size of filters (a.k.a. kernels) of the
first convolution layer. ``ksize=k`` and ``ksize=(k, k)`` are
equivalent.
stride (int or pair of ints): Stride of filter applications at the
first convolution layer. ``stride=s`` and ``stride=(s, s)`` are
equivalent.
pad (int or pair of ints): Spatial padding width for input arrays at
the first convolution layer. ``pad=p`` and ``pad=(p, p)`` are
equivalent.
activation (callable):
Activation function for internal hidden units.
You can specify one of activation functions from
:doc:`built-in activation functions </reference/functions>` or
your own function.
It should not be an
:doc:`activation functions with parameters </reference/links>`
(i.e., :class:`~chainer.Link` instance).
The function must accept one argument (the output from each child
link), and return a value.
Returned value must be a Variable derived from the input Variable
to perform backpropagation on the variable.
Note that this function is not applied to the output of this link.
conv_init: An initializer of weight matrices
passed to the convolution layers. This option must be specified as
a keyword argument.
bias_init: An initializer of bias vectors
passed to the convolution layers. This option must be specified as
a keyword argument.
See: `Network in Network <https://arxiv.org/abs/1312.4400v3>`_.
Attributes:
activation (callable):
Activation function.
See the description in the arguments for details.
"""
def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0,
activation=relu.relu, *args, **kwargs):
# If `args` is not empty, users assume the API for v1 and
# specify `wscale` as a positonal argument, which we want
# to detect and forbid with an explicit error message.
msg = ('wscale is not supported anymore. '
'Use conv_init and bias_init argument to change '
'the scale of initial parameters.')
if args:
raise TypeError(msg)
argument.check_unexpected_kwargs(kwargs, wscale=msg)
conv_init, bias_init = argument.parse_kwargs(
kwargs, ('conv_init', None), ('bias_init', None))
if ksize is None:
out_channels, ksize, in_channels = in_channels, out_channels, None
assert len(out_channels) > 0
convs = [convolution_2d.Convolution2D(
in_channels, out_channels[0], ksize, stride, pad,
initialW=conv_init, initial_bias=bias_init)]
for n_in, n_out in zip(out_channels, out_channels[1:]):
convs.append(convolution_2d.Convolution2D(
n_in, n_out, 1, initialW=conv_init,
initial_bias=bias_init))
super(MLPConvolution2D, self).__init__(*convs)
self.activation = activation
def forward(self, x):
"""Computes the output of the mlpconv layer.
Args:
x (~chainer.Variable): Input image.
Returns:
~chainer.Variable: Output of the mlpconv layer.
"""
f = self.activation
for l in self[:-1]:
x = f(l(x))
return self[-1](x)
| 4,629
| 41.477064
| 78
|
py
|
chainer
|
chainer-master/chainer/links/connection/embed_id.py
|
from chainer.functions.connection import embed_id
from chainer.initializers import normal
from chainer import link
from chainer import variable
class EmbedID(link.Link):
"""Efficient linear layer for one-hot input.
This is a link that wraps the :func:`~chainer.functions.embed_id` function.
This link holds the ID (word) embedding matrix ``W`` as a parameter.
Args:
in_size (int): Number of different identifiers (a.k.a. vocabulary
size).
out_size (int): Size of embedding vector.
initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be 2.
ignore_label (int or None): If ``ignore_label`` is an int value,
``i``-th row of return value is filled with ``0``.
.. seealso:: :func:`~chainer.functions.embed_id`
Attributes:
W (~chainer.Variable): Embedding parameter matrix.
.. admonition:: Example
>>> W = np.array([[0, 0, 0],
... [1, 1, 1],
... [2, 2, 2]]).astype(np.float32)
>>> W
array([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.]], dtype=float32)
>>> l = L.EmbedID(W.shape[0], W.shape[1], initialW=W)
>>> x = np.array([2, 1]).astype(np.int32)
>>> x
array([2, 1], dtype=int32)
>>> y = l(x)
>>> y.array
array([[2., 2., 2.],
[1., 1., 1.]], dtype=float32)
"""
ignore_label = None
def __init__(self, in_size, out_size, initialW=None, ignore_label=None):
super(EmbedID, self).__init__()
self.ignore_label = ignore_label
with self.init_scope():
if initialW is None:
initialW = normal.Normal(1.0)
self.W = variable.Parameter(initialW, (in_size, out_size))
@classmethod
def from_params(cls, W, ignore_label=None):
"""Initialize `~chainer.links.EmbedID` with the given parameter.
Args:
W (:class:`~chainer.Variable` or :ref:`ndarray`):
The weight parameter.
ignore_label (int or None): If ``ignore_label`` is an int value,
``i``-th column of return value is filled with ``0``.
"""
in_size, out_size = W.shape
link = cls(
in_size, out_size,
initialW=variable.as_array(W),
ignore_label=ignore_label
)
return link
def forward(self, x):
"""Extracts the word embedding of given IDs.
Args:
x (~chainer.Variable): Batch vectors of IDs.
Returns:
~chainer.Variable: Batch of corresponding embeddings.
"""
return embed_id.embed_id(x, self.W, ignore_label=self.ignore_label)
| 2,838
| 30.898876
| 79
|
py
|
chainer
|
chainer-master/chainer/links/connection/dilated_convolution_2d.py
|
from chainer.functions.connection import dilated_convolution_2d
from chainer import initializers
from chainer import link
from chainer import variable
class DilatedConvolution2D(link.Link):
"""Two-dimensional dilated convolutional layer.
This link wraps the :func:`~chainer.functions.dilated_convolution_2d`
function and holds the filter weight and bias vector as parameters.
.. note::
You can also define a dilated convolutional layer by passing ``dilate``
argument to :class:`chainer.links.Convolution2D`.
The functionality is the same.
Args:
in_channels (int or None): Number of channels of input arrays.
If ``None``, parameter initialization will be deferred until the
first forward data pass at which time the size will be determined.
out_channels (int): Number of channels of output arrays.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
dilate (int or pair of ints): Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
nobias (bool): If ``True``, then this link does not use the bias term.
initialW (:ref:`initializer <initializer>`): Initializer to
initialize the weight. When it is :class:`numpy.ndarray`,
its ``ndim`` should be 4.
initial_bias (:ref:`initializer <initializer>`): Initializer to
initialize the bias. If ``None``, the bias will be initialized to
zero. When it is :class:`numpy.ndarray`, its ``ndim`` should be 1.
.. seealso::
See :func:`chainer.functions.dilated_convolution_2d`
for the definition of two-dimensional dilated convolution.
Attributes:
W (~chainer.Variable): Weight parameter.
b (~chainer.Variable): Bias parameter.
.. admonition:: Example
There are several ways to make a DilatedConvolution2D link.
Let an input vector ``x`` be:
>>> x = np.arange(1 * 3 * 10 * 10, dtype=np.float32).\
reshape(1, 3, 10, 10)
1. Give the first three arguments explicitly:
>>> l = L.DilatedConvolution2D(3, 7, 5)
>>> y = l(x)
>>> y.shape
(1, 7, 6, 6)
2. Omit ``in_channels`` or fill it with ``None``:
The below two cases are the same.
>>> l = L.DilatedConvolution2D(7, 5)
>>> y = l(x)
>>> y.shape
(1, 7, 6, 6)
>>> l = L.DilatedConvolution2D(None, 7, 5)
>>> y = l(x)
>>> y.shape
(1, 7, 6, 6)
When you omit the first argument, you need to specify the other
subsequent arguments from ``stride`` as keyword auguments. So the
below two cases are the same.
>>> l = L.DilatedConvolution2D(None, 7, 5, 1, 0, 2)
>>> y = l(x)
>>> y.shape
(1, 7, 2, 2)
>>> l = L.DilatedConvolution2D(7, 5, stride=1, pad=0, dilate=2)
>>> y = l(x)
>>> y.shape
(1, 7, 2, 2)
"""
def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0,
dilate=1, nobias=False, initialW=None, initial_bias=None):
super(DilatedConvolution2D, self).__init__()
if ksize is None:
out_channels, ksize, in_channels = in_channels, out_channels, None
self.ksize = ksize
self.stride = _pair(stride)
self.pad = _pair(pad)
self.dilate = _pair(dilate)
self.out_channels = out_channels
with self.init_scope():
W_initializer = initializers._get_initializer(initialW)
self.W = variable.Parameter(W_initializer)
if in_channels is not None:
self._initialize_params(in_channels)
if nobias:
self.b = None
else:
if initial_bias is None:
initial_bias = 0
initial_bias = initializers._get_initializer(initial_bias)
self.b = variable.Parameter(initial_bias, out_channels)
def _initialize_params(self, in_channels):
kh, kw = _pair(self.ksize)
W_shape = (self.out_channels, in_channels, kh, kw)
self.W.initialize(W_shape)
def forward(self, x):
"""Applies the convolution layer.
Args:
x (~chainer.Variable): Input image.
Returns:
~chainer.Variable: Output of the convolution.
"""
if self.W.array is None:
self._initialize_params(x.shape[1])
return dilated_convolution_2d.dilated_convolution_2d(
x, self.W, self.b, self.stride, self.pad, self.dilate)
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
| 5,146
| 34.253425
| 78
|
py
|
chainer
|
chainer-master/chainer/links/loss/negative_sampling.py
|
import numpy
import chainer
from chainer.functions.loss import negative_sampling
from chainer import link
from chainer.utils import argument
from chainer.utils import walker_alias
from chainer import variable
class NegativeSampling(link.Link):
"""Negative sampling loss layer.
This link wraps the :func:`~chainer.functions.negative_sampling` function.
It holds the weight matrix as a parameter. It also builds a sampler
internally given a list of word counts.
Args:
in_size (int): Dimension of input vectors.
counts (int list): Number of each identifiers.
sample_size (int): Number of negative samples.
power (float): Power factor :math:`\\alpha`.
dtype (numpy.dtype): Type to use in computing.
.. seealso:: :func:`~chainer.functions.negative_sampling` for more detail.
Attributes:
W (~chainer.Variable): Weight parameter matrix.
"""
def __init__(self, in_size, counts, sample_size, power=0.75, dtype=None):
super(NegativeSampling, self).__init__()
dtype = chainer.get_dtype(dtype)
vocab_size = len(counts)
self.sample_size = sample_size
power = dtype.type(power)
p = numpy.array(counts, dtype)
numpy.power(p, power, p)
self.sampler = walker_alias.WalkerAlias(p)
with self.init_scope():
self.W = variable.Parameter(0, (vocab_size, in_size))
def device_resident_accept(self, visitor):
super(NegativeSampling, self).device_resident_accept(visitor)
self.sampler.device_resident_accept(visitor)
def forward(self, x, t, reduce='sum', **kwargs):
"""forward(x, t, reduce='sum', *, return_samples=False)
Computes the loss value for given input and ground truth labels.
Args:
x (~chainer.Variable): Input of the weight matrix multiplication.
t (~chainer.Variable): Batch of ground truth labels.
reduce (str): Reduction option. Its value must be either
``'sum'`` or ``'no'``. Otherwise, :class:`ValueError` is
raised.
return_samples (bool):
If ``True``, the sample array is also returned.
The sample array is a
:math:`(\\text{batch_size}, \\text{sample_size} + 1)`-array of
integers whose first column is fixed to the ground truth labels
and the other columns are drawn from the
:class:`chainer.utils.WalkerAlias` sampler.
Returns:
~chainer.Variable or tuple:
If ``return_samples`` is ``False`` (default), loss value is
returned.
Otherwise, a tuple of the loss value and the sample array
is returned.
"""
return_samples = False
if kwargs:
return_samples, = argument.parse_kwargs(
kwargs, ('return_samples', return_samples))
ret = negative_sampling.negative_sampling(
x, t, self.W, self.sampler.sample, self.sample_size,
reduce=reduce, return_samples=return_samples)
return ret
| 3,154
| 35.264368
| 79
|
py
|
chainer
|
chainer-master/chainer/links/loss/black_out.py
|
import numpy
from chainer.functions.loss import black_out
from chainer import link
from chainer.utils import walker_alias
from chainer import variable
class BlackOut(link.Link):
"""BlackOut loss layer.
.. seealso:: :func:`~chainer.functions.black_out` for more detail.
Args:
in_size (int): Dimension of input vectors.
counts (int list): Number of each identifiers.
sample_size (int): Number of negative samples.
Attributes:
W (~chainer.Parameter): Weight parameter matrix.
"""
sample_data = None
def __init__(self, in_size, counts, sample_size):
super(BlackOut, self).__init__()
vocab_size = len(counts)
p = numpy.array(counts, dtype=numpy.float32)
self.sampler = walker_alias.WalkerAlias(p)
self.sample_size = sample_size
with self.init_scope():
self.W = variable.Parameter(shape=(vocab_size, in_size))
def device_resident_accept(self, visitor):
super(BlackOut, self).device_resident_accept(visitor)
self.sampler.device_resident_accept(visitor)
def forward(self, x, t):
"""Computes the loss value for given input and ground truth labels.
Args:
x (~chainer.Variable): Input of the weight matrix multiplication.
t (~chainer.Variable): Batch of ground truth labels.
Returns:
~chainer.Variable: Loss value.
"""
batch_size = x.shape[0]
if self.sample_data is not None:
# for test
sample_data = self.sample_data
else:
shape = (batch_size, self.sample_size)
sample_data = self.sampler.sample(shape)
samples = variable.Variable(sample_data, requires_grad=False)
return black_out.black_out(x, t, self.W, samples)
| 1,822
| 28.403226
| 77
|
py
|
chainer
|
chainer-master/chainer/links/loss/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/links/loss/hierarchical_softmax.py
|
import copy
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import device_resident
from chainer import function
from chainer.initializers import uniform
from chainer import link
from chainer import utils
from chainer.utils import type_check
from chainer import variable
class TreeParser(object):
def __init__(self, dtype):
self.next_id = 0
self.dtype = dtype
def size(self):
return self.next_id
def get_paths(self):
return self.paths
def get_codes(self):
return self.codes
def parse(self, tree):
self.next_id = 0
self.path = []
self.code = []
self.paths = {}
self.codes = {}
self._parse(tree)
assert(len(self.path) == 0)
assert(len(self.code) == 0)
assert(len(self.paths) == len(self.codes))
def _parse(self, node):
if isinstance(node, tuple):
# internal node
if len(node) != 2:
raise ValueError(
'All internal nodes must have two child nodes')
left, right = node
self.path.append(self.next_id)
self.next_id += 1
self.code.append(1.0)
self._parse(left)
self.code[-1] = -1.0
self._parse(right)
self.path.pop()
self.code.pop()
else:
# leaf node
self.paths[node] = numpy.array(self.path, dtype=numpy.int32)
self.codes[node] = numpy.array(self.code, dtype=self.dtype)
class BinaryHierarchicalSoftmaxFunction(
device_resident.DeviceResident, function.Function):
"""Hierarchical softmax function based on a binary tree.
This function object should be allocated beforehand, and be copied on every
forward computation, since the initializer parses the given tree. See the
implementation of :class:`BinaryHierarchicalSoftmax` for details.
Args:
tree: A binary tree made with tuples like ``((1, 2), 3)``.
.. seealso::
See :class:`BinaryHierarchicalSoftmax` for details.
"""
def __init__(self, tree, dtype):
device_resident.DeviceResident.__init__(self)
parser = TreeParser(dtype)
parser.parse(tree)
paths = parser.get_paths()
codes = parser.get_codes()
n_vocab = max(paths.keys()) + 1
self.paths = numpy.concatenate(
[paths[i] for i in range(n_vocab) if i in paths])
self.codes = numpy.concatenate(
[codes[i] for i in range(n_vocab) if i in codes])
begins = numpy.empty((n_vocab + 1,), dtype=numpy.int32)
begins[0] = 0
for i in range(0, n_vocab):
length = len(paths[i]) if i in paths else 0
begins[i + 1] = begins[i] + length
self.begins = begins
self.parser_size = parser.size()
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, t_type, w_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 2,
t_type.dtype == numpy.int32,
t_type.ndim == 1,
x_type.shape[0] == t_type.shape[0],
w_type.dtype == x_type.dtype,
w_type.ndim == 2,
w_type.shape[0] == self.parser_size,
w_type.shape[1] == x_type.shape[1],
)
def device_resident_accept(self, visitor):
super(BinaryHierarchicalSoftmaxFunction, self).device_resident_accept(
visitor)
self.paths = visitor.visit_array(self.paths)
self.codes = visitor.visit_array(self.codes)
self.begins = visitor.visit_array(self.begins)
def forward_cpu(self, inputs):
x, t, W = inputs
loss = x.dtype.type(0.0)
for ix, it in six.moves.zip(x, t):
loss += self._forward_cpu_one(ix, it, W)
return numpy.array(loss),
def _forward_cpu_one(self, x, t, W):
begin = self.begins[t]
end = self.begins[t + 1]
w = W[self.paths[begin:end]]
wxy = w.dot(x) * self.codes[begin:end]
loss = numpy.logaddexp(0.0, -wxy) # == log(1 + exp(-wxy))
return numpy.sum(loss)
def backward_cpu(self, inputs, grad_outputs):
x, t, W = inputs
gloss, = grad_outputs
gx = numpy.empty_like(x)
gW = numpy.zeros_like(W)
for i, (ix, it) in enumerate(six.moves.zip(x, t)):
gx[i] = self._backward_cpu_one(ix, it, W, gloss, gW)
return gx, None, gW
def _backward_cpu_one(self, x, t, W, gloss, gW):
begin = self.begins[t]
end = self.begins[t + 1]
path = self.paths[begin:end]
w = W[path]
wxy = w.dot(x) * self.codes[begin:end]
g = -gloss * self.codes[begin:end] / (1.0 + numpy.exp(wxy))
gx = g.dot(w)
gw = g.reshape((g.shape[0], 1)).dot(x.reshape(1, x.shape[0]))
gW[path] += gw
return gx
def forward_gpu(self, inputs):
x, t, W = inputs
max_length = cuda.reduce(
'T t, raw T begins', 'T out', 'begins[t + 1] - begins[t]',
'max(a, b)', 'out = a', '0',
'binary_hierarchical_softmax_max_length')(t, self.begins)
max_length = cuda.to_cpu(max_length)[()]
length = max_length * x.shape[0]
ls = cuda.cupy.empty((length,), dtype=x.dtype)
n_in = x.shape[1]
wxy = cuda.cupy.empty_like(ls)
cuda.elementwise(
'''raw T x, raw T w, raw int32 ts, raw int32 paths,
raw T codes, raw int32 begins, int32 c, int32 max_length''',
'T ls, T wxy',
'''
int ind = i / max_length;
int offset = i - ind * max_length;
int t = ts[ind];
int begin = begins[t];
int length = begins[t + 1] - begins[t];
if (offset < length) {
int p = begin + offset;
int node = paths[p];
T wx = 0;
for (int j = 0; j < c; ++j) {
int w_ind[] = {node, j};
int x_ind[] = {ind, j};
wx += w[w_ind] * x[x_ind];
}
wxy = wx * codes[p];
ls = log(1 + exp(-wxy));
} else {
ls = 0;
}
''',
'binary_hierarchical_softmax_forward'
)(x, W, t, self.paths, self.codes, self.begins, n_in, max_length, ls,
wxy)
self.max_length = max_length
self.wxy = wxy
return ls.sum(),
def backward_gpu(self, inputs, grad_outputs):
utils.nondeterministic('atomicAdd')
x, t, W = inputs
gloss, = grad_outputs
n_in = x.shape[1]
gx = cuda.cupy.zeros_like(x)
gW = cuda.cupy.zeros_like(W)
cuda.elementwise(
'''T wxy, raw T x, raw T w, raw int32 ts, raw int32 paths,
raw T codes, raw int32 begins, raw T gloss,
int32 c, int32 max_length''',
'raw T gx, raw T gw',
'''
int ind = i / max_length;
int offset = i - ind * max_length;
int t = ts[ind];
int begin = begins[t];
int length = begins[t + 1] - begins[t];
if (offset < length) {
int p = begin + offset;
int node = paths[p];
T code = codes[p];
T g = -gloss[0] * code / (1.0 + exp(wxy));
for (int j = 0; j < c; ++j) {
int w_ind[] = {node, j};
int x_ind[] = {ind, j};
atomicAdd(&gx[x_ind], g * w[w_ind]);
atomicAdd(&gw[w_ind], g * x[x_ind]);
}
}
''',
'binary_hierarchical_softmax_bwd'
)(self.wxy, x, W, t, self.paths, self.codes, self.begins, gloss, n_in,
self.max_length, gx, gW)
return gx, None, gW
class BinaryHierarchicalSoftmax(link.Link):
"""Hierarchical softmax layer over binary tree.
In natural language applications, vocabulary size is too large to use
softmax loss.
Instead, the hierarchical softmax uses product of sigmoid functions.
It costs only :math:`O(\\log(n))` time where :math:`n` is the vocabulary
size in average.
At first a user needs to prepare a binary tree whose each leaf is
corresponding to a word in a vocabulary.
When a word :math:`x` is given, exactly one path from the root of the tree
to the leaf of the word exists.
Let :math:`\\mbox{path}(x) = ((e_1, b_1), \\dots, (e_m, b_m))` be the path
of :math:`x`, where :math:`e_i` is an index of :math:`i`-th internal node,
and :math:`b_i \\in \\{-1, 1\\}` indicates direction to move at
:math:`i`-th internal node (-1 is left, and 1 is right).
Then, the probability of :math:`x` is given as below:
.. math::
P(x) &= \\prod_{(e_i, b_i) \\in \\mbox{path}(x)}P(b_i | e_i) \\\\
&= \\prod_{(e_i, b_i) \\in \\mbox{path}(x)}\\sigma(b_i x^\\top
w_{e_i}),
where :math:`\\sigma(\\cdot)` is a sigmoid function, and :math:`w` is a
weight matrix.
This function costs :math:`O(\\log(n))` time as an average length of paths
is :math:`O(\\log(n))`, and :math:`O(n)` memory as the number of internal
nodes equals :math:`n - 1`.
Args:
in_size (int): Dimension of input vectors.
tree: A binary tree made with tuples like `((1, 2), 3)`.
dtype (numpy.dtype): Type to use in computing.
Attributes:
W (~chainer.Variable): Weight parameter matrix.
See: Hierarchical Probabilistic Neural Network Language Model [Morin+,
AISTAT2005].
"""
def __init__(self, in_size, tree, dtype=None):
# This function object is copied on every forward computation.
super(BinaryHierarchicalSoftmax, self).__init__()
dtype = chainer.get_dtype(dtype)
self._func = BinaryHierarchicalSoftmaxFunction(tree, dtype)
with self.init_scope():
self.W = variable.Parameter(uniform.Uniform(1),
(self._func.parser_size, in_size))
def device_resident_accept(self, visitor):
super(BinaryHierarchicalSoftmax, self).device_resident_accept(visitor)
self._func.device_resident_accept(visitor)
@staticmethod
def create_huffman_tree(word_counts):
"""Makes a Huffman tree from a dictionary containing word counts.
This method creates a binary Huffman tree, that is required for
:class:`BinaryHierarchicalSoftmax`.
For example, ``{0: 8, 1: 5, 2: 6, 3: 4}`` is converted to
``((3, 1), (2, 0))``.
Args:
word_counts (dict of int key and int or float values):
Dictionary representing counts of words.
Returns:
Binary Huffman tree with tuples and keys of ``word_coutns``.
"""
if not word_counts:
raise ValueError('Empty vocabulary')
q = six.moves.queue.PriorityQueue()
# Add unique id to each entry so that we can compare two entries with
# same counts.
# Note that iteritems randomly order the entries.
for uid, (w, c) in enumerate(six.iteritems(word_counts)):
q.put((c, uid, w))
while q.qsize() >= 2:
(count1, id1, word1) = q.get()
(count2, id2, word2) = q.get()
count = count1 + count2
tree = (word1, word2)
q.put((count, min(id1, id2), tree))
return q.get()[2]
def forward(self, x, t):
"""Computes the loss value for given input and ground truth labels.
Args:
x (~chainer.Variable): Input to the classifier at each node.
t (~chainer.Variable): Batch of ground truth labels.
Returns:
~chainer.Variable: Loss value.
"""
f = copy.copy(self._func) # creates a copy of the function node
return f(x, t, self.W)
| 12,026
| 31.860656
| 79
|
py
|
chainer
|
chainer-master/chainer/links/loss/crf1d.py
|
from chainer.functions.array import transpose_sequence
from chainer.functions.loss import crf1d
from chainer import initializers
from chainer import link
from chainer.links.rnn.n_step_rnn import argsort_list_descent
from chainer.links.rnn.n_step_rnn import permutate_list
from chainer import variable
class CRF1d(link.Link):
"""Linear-chain conditional random field loss layer.
This link wraps the :func:`~chainer.functions.crf1d` function.
It holds a transition cost matrix as a parameter.
Args:
n_label (int): Number of labels.
initial_cost (:ref:`initializer <initializer>`): Initializer to
initialize the transition cost matrix.
If this attribute is not specified,
the transition cost matrix is initialized with zeros.
.. seealso:: :func:`~chainer.functions.crf1d` for more detail.
Attributes:
cost (~chainer.Variable): Transition cost parameter.
"""
def __init__(self, n_label, initial_cost=None):
super(CRF1d, self).__init__()
if initial_cost is None:
initial_cost = initializers.constant.Zero()
with self.init_scope():
self.cost = variable.Parameter(initializer=initial_cost,
shape=(n_label, n_label))
def forward(self, xs, ys, reduce='mean', transpose=False):
"""Computes negative log-likelihood of linear-chain CRF
Args:
xs (list of Variable): Input vector for each label
ys (list of Variable): Expected output labels.
transpose (bool): If ``True``, input/output sequences
will be sorted in descending order of length.
Returns:
~chainer.Variable: A variable holding the average negative
log-likelihood of the input sequences.
.. seealso:: See :func:`~chainer.frunctions.crf1d` for more detail.
"""
if transpose:
indices = argsort_list_descent(xs)
xs = permutate_list(xs, indices, inv=False)
ys = permutate_list(ys, indices, inv=False)
trans_x = transpose_sequence.transpose_sequence(xs)
trans_y = transpose_sequence.transpose_sequence(ys)
loss = crf1d.crf1d(self.cost, trans_x, trans_y, reduce)
else:
loss = crf1d.crf1d(self.cost, xs, ys, reduce)
return loss
def argmax(self, xs, transpose=False):
"""Computes a state that maximizes a joint probability.
Args:
xs (list of Variable): Input vector for each label.
transpose (bool): If ``True``, input/output sequences
will be sorted in descending order of length.
Returns:
tuple: A tuple of :class:`~chainer.Variable` representing each
log-likelihood and a list representing the argmax path.
.. seealso:: See :func:`~chainer.frunctions.crf1d_argmax` for more
detail.
"""
if transpose:
indices = argsort_list_descent(xs)
xs = permutate_list(xs, indices, inv=False)
trans_x = transpose_sequence.transpose_sequence(xs)
score, path = crf1d.argmax_crf1d(self.cost, trans_x)
path = transpose_sequence.transpose_sequence(path)
path = [p.array for p in path]
path = permutate_list(path, indices, inv=True)
else:
score, path = crf1d.argmax_crf1d(self.cost, xs)
return score, path
| 3,495
| 34.313131
| 75
|
py
|
chainer
|
chainer-master/chainer/links/caffe/caffe_function.py
|
import warnings
import numpy
import six
from chainer import configuration
from chainer import functions
from chainer import initializer
from chainer import link
from chainer.links.caffe.protobuf3 import caffe_pb2 as caffe_pb
from chainer.links.connection import convolution_2d
from chainer.links.connection import deconvolution_2d
from chainer.links.connection import linear
from chainer.links.connection import scale
from chainer.links.normalization import batch_normalization
from chainer.utils import argument
try:
# This method is undocumented, but is required to read large size of
# model files when a user uses cpp-implementation.
from google.protobuf.pyext import _message
_message.SetAllowOversizeProtos(True)
except ImportError:
pass
_type_to_method = {}
_oldname_to_method = {}
def _layer(typ, oldname):
def decorator(meth):
global _type_to_method
_type_to_method[typ] = meth
if oldname is not None:
typevalue = getattr(caffe_pb.V1LayerParameter, oldname)
_oldname_to_method[typevalue] = meth
return meth
return decorator
class _Blob(initializer.Initializer):
chunk_size = 1024 * 1024
def __init__(self, blob):
super(_Blob, self).__init__()
self.data = blob.data
def __call__(self, array):
array = array.ravel()
size = len(array)
indices = list(range(0, size, self.chunk_size))
# Rather than accessing Protobuf's RepeatedScalar fields directly,
# creating a intermediate list by indexing is more efficient due to
# the implementation of the Python extension of Protobuf.
# To avoid allocating excessively large lists, we limit the length
# of lists by `chunk_size`.
for start, end in zip(indices, indices[1:] + [size]):
array[start:end] = self.data[start:end]
class _ConvolutionBlob(_Blob):
def __init__(self, blob, group):
super(_ConvolutionBlob, self).__init__(blob)
self.group = group
def __call__(self, array):
n_out, n_in = array.shape[:2]
part_out = n_out // self.group
part_in = n_in // self.group
array[...] = 0
part_size = len(self.data) // self.group
for i in six.moves.range(self.group):
out_slice = slice(i * part_out, (i + 1) * part_out)
in_slice = slice(i * part_in, (i + 1) * part_in)
w = array[out_slice, in_slice]
data = numpy.array(self.data[i * part_size:(i + 1) * part_size])
w[:] = data.reshape(w.shape)
class CaffeFunction(link.Chain):
"""Caffe emulator based on the model file of Caffe.
Given a protocol buffers file of a Caffe model, this class loads and
emulates it on :class:`~chainer.Variable` objects. It supports the official
reference models provided by BVLC.
.. note::
CaffeFunction ignores the following layers:
- Layers that CaffeFunction does not support (including data layers)
- Layers that have no top blobs
- Layers whose bottom blobs are incomplete (i.e., some or all of them
are not given nor computed)
.. warning::
It does not support full compatibility against Caffe. Some layers and
configurations are not implemented in Chainer yet, though the reference
models provided by the BVLC team are supported except data layers.
.. admonition:: Example
Consider we want to extract the (unnormalized) log class probability
of given images using BVLC reference CaffeNet. The model can be
downloaded from:
http://dl.caffe.berkeleyvision.org/bvlc_reference_caffenet.caffemodel
We want to compute the ``fc8`` blob from the ``data`` blob. It is simply
written as follows::
# Load the model
func = CaffeFunction('path/to/bvlc_reference_caffenet.caffemodel')
# Minibatch of size 10
x_data = numpy.ndarray((10, 3, 227, 227), dtype=numpy.float32)
... # (Fill the minibatch here)
# Forward the pre-trained net
x = Variable(x_data)
y, = func(inputs={'data': x}, outputs=['fc8'])
The result ``y`` contains the Variable corresponding to the ``fc8``
blob. The computational graph is memorized as a usual forward
computation in Chainer, so we can run backprop through this pre-trained
net.
Args:
model_path (str): Path to the binary-proto model file of Caffe.
Attributes:
forwards (dict): A mapping from layer names to corresponding functions.
"""
def __init__(self, model_path):
super(CaffeFunction, self).__init__()
net = caffe_pb.NetParameter()
with open(model_path, 'rb') as model_file:
net.MergeFromString(model_file.read())
self.forwards = {}
self.split_map = {}
self.layers = []
if net.layer:
for layer in net.layer:
meth = _type_to_method.get(layer.type)
if meth:
meth(self, layer)
else:
warnings.warn(
'Skip the layer "%s", since CaffeFunction does not '
'support %s layer' % (layer.name, layer.type))
else: # v1 format
for layer in net.layers:
meth = _oldname_to_method.get(layer.type)
if meth:
meth(self, layer)
else:
warnings.warn(
'Skip the layer "%s", since CaffeFunction does not '
'support it' % layer.name)
def forward(self, inputs, outputs, disable=(), **kwargs):
"""forward(self, inputs, outputs, disable=())
Executes a sub-network of the network.
This function acts as an interpreter of the network definition for
Caffe. On execution, it interprets each layer one by one, and if the
bottom blobs are already computed, then emulates the layer and stores
output blobs as :class:`~chainer.Variable` objects.
Args:
inputs (dict): A dictionary whose key-value pairs indicate initial
correspondences between blob names and
:class:`~chainer.Variable` objects.
outputs (Iterable): A list of blob names whose corresponding
:class:`~chainer.Variable` objects are returned.
disable (Iterable): A list of layer names that will be ignored
during the forward computation.
Returns:
tuple: A tuple of output :class:`~chainer.Variable` objects
corresponding to elements of the `outputs` argument.
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
variables = dict(inputs)
disable = set(disable)
for func_name, bottom, top in self.layers:
if (func_name in disable or
func_name not in self.forwards or
any(blob not in variables for blob in bottom)):
continue
func = self.forwards[func_name]
input_vars = tuple(variables[blob] for blob in bottom)
output_vars = func(*input_vars)
if not isinstance(output_vars, (tuple, list)):
output_vars = output_vars,
for var, name in zip(output_vars, top):
variables[name] = var
self.variables = variables
return tuple(variables[blob] for blob in outputs)
def _add_layer(self, layer):
bottom = []
for blob_name in layer.bottom:
bottom.append(self.split_map.get(blob_name, blob_name))
self.layers.append((layer.name, bottom, list(layer.top)))
@_layer('Concat', 'CONCAT')
def _setup_concat(self, layer):
param = layer.concat_param
axis = param.axis
if axis == 1 and param.concat_dim != 1:
axis = param.concat_dim
self.forwards[layer.name] = _ListArgumentFcuntion(
functions.concat, axis=axis)
self._add_layer(layer)
@_layer('Convolution', 'CONVOLUTION')
def _setup_convolution(self, layer):
blobs = layer.blobs
param = layer.convolution_param
ksize = _get_ksize(param)
stride = _get_stride(param)
pad = _get_pad(param)
num = _get_num(blobs[0])
channels = _get_channels(blobs[0])
bias_term = param.bias_term
n_in = channels * param.group
n_out = num
func = convolution_2d.Convolution2D(
n_in, n_out, ksize, stride, pad, nobias=not bias_term,
initialW=_ConvolutionBlob(blobs[0], param.group),
initial_bias=_Blob(blobs[1]) if bias_term else None)
with self.init_scope():
setattr(self, layer.name, func)
self.forwards[layer.name] = _CallChildLink(self, layer.name)
self._add_layer(layer)
@_layer('Deconvolution', 'DECONVOLUTION')
def _setup_deconvolution(self, layer):
blobs = layer.blobs
param = layer.convolution_param
ksize = _get_ksize(param)
stride = _get_stride(param)
pad = _get_pad(param)
num = _get_num(blobs[0])
channels = _get_channels(blobs[0])
bias_term = param.bias_term
n_in = num
n_out = channels * param.group
func = deconvolution_2d.Deconvolution2D(
n_in, n_out, ksize, stride, pad, nobias=not bias_term,
initialW=_ConvolutionBlob(blobs[0], param.group),
initial_bias=_Blob(blobs[1]) if bias_term else None)
with self.init_scope():
setattr(self, layer.name, func)
self.forwards[layer.name] = _CallChildLink(self, layer.name)
self._add_layer(layer)
@_layer('Data', 'DATA')
def _setup_data(self, layer):
# We silently skip the data layer.
pass
@_layer('Dropout', 'DROPOUT')
def _setup_dropout(self, layer):
param = layer.dropout_param
self.forwards[layer.name] = _SingleArgumentFunction(
functions.dropout, ratio=param.dropout_ratio)
self._add_layer(layer)
@_layer('InnerProduct', 'INNER_PRODUCT')
def _setup_inner_product(self, layer):
param = layer.inner_product_param
bias_term = param.bias_term
if param.axis != 1:
raise RuntimeError(
'Non-default axis in InnerProduct is not supported')
blobs = layer.blobs
width, height = _get_width(blobs[0]), _get_height(blobs[0])
func = linear.Linear(
width, height, nobias=not bias_term,
initialW=_Blob(blobs[0]),
initial_bias=_Blob(blobs[1]) if bias_term else None)
with self.init_scope():
setattr(self, layer.name, func)
self.forwards[layer.name] = _CallChildLink(self, layer.name)
self._add_layer(layer)
@_layer('LRN', 'LRN')
def _setup_lrn(self, layer):
param = layer.lrn_param
if param.norm_region != param.ACROSS_CHANNELS:
raise RuntimeError('Within-channel LRN is not supported')
fwd = _SingleArgumentFunction(
functions.local_response_normalization,
n=param.local_size, k=param.k,
alpha=param.alpha / param.local_size, beta=param.beta)
self.forwards[layer.name] = fwd
self._add_layer(layer)
@_layer('Pooling', 'POOLING')
def _setup_pooling(self, layer):
param = layer.pooling_param
ksize = _get_ksize(param)
stride = _get_stride(param)
pad = _get_pad(param)
if param.pool == param.MAX:
func = functions.max_pooling_2d
elif param.pool == param.AVE:
func = functions.average_pooling_2d
else:
raise RuntimeError('Stochastic pooling is not supported')
if param.global_pooling and not ksize:
# if global_pooling is set but no kernel size, the kernel size
# is computed dynamically to cover the whole input feature map
def _func(x, stride, pad):
return func(x, x.shape[2:], stride=stride, pad=pad)
fw = _SingleArgumentFunction(_func, stride=stride, pad=pad)
else:
fw = _SingleArgumentFunction(func, ksize, stride=stride, pad=pad)
self.forwards[layer.name] = fw
self._add_layer(layer)
@_layer('ReLU', 'RELU')
def _setup_relu(self, layer):
slope = layer.relu_param.negative_slope
if slope != 0:
fw = _SingleArgumentFunction(functions.leaky_relu, slope=slope)
else:
fw = functions.relu
self.forwards[layer.name] = fw
self._add_layer(layer)
@_layer('Reshape', None)
def _setup_reshape(self, layer):
shape = layer.reshape_param.shape.dim
fw = _SingleArgumentFunction(functions.reshape, shape=shape)
self.forwards[layer.name] = fw
self._add_layer(layer)
@_layer('BatchNorm', None)
def _setup_batchnorm(self, layer):
# Get layer parameters.
blobs = layer.blobs
param = layer.batch_norm_param
use_global_stats = param.use_global_stats
decay = param.moving_average_fraction
eps = param.eps
size = int(blobs[0].shape.dim[0]) # Get channel dim from mean blob.
# Make BatchNormalization link.
func = batch_normalization.BatchNormalization(
size, decay=decay, eps=eps, use_gamma=False, use_beta=False)
_Blob(blobs[0])(func.avg_mean)
_Blob(blobs[1])(func.avg_var)
# Scale the means and variances if a scaling factor is appended to the
# blobs to correctly mimic to the behavior of Caffe. See
# https://github.com/BVLC/caffe/issues/4885
if len(blobs) >= 3:
scaling_factor = blobs[2].data
func.avg_mean /= scaling_factor[0]
func.avg_var /= scaling_factor[0]
with self.init_scope():
setattr(self, layer.name, func)
# Add layer.
if use_global_stats:
func_class = _SingleArgumentFunctionTestMode
else:
func_class = _SingleArgumentFunction
fwd = func_class(_CallChildLink(self, layer.name), finetune=False)
self.forwards[layer.name] = fwd
self._add_layer(layer)
@_layer('Eltwise', 'ELTWISE')
def _setup_eltwise(self, layer):
# stable_prod_grad parameter is not supported now.
operation = layer.eltwise_param.operation
coeffs = layer.eltwise_param.coeff or None
self.forwards[layer.name] = _EltwiseFunction(operation, coeffs)
self._add_layer(layer)
@_layer('Scale', None)
def _setup_scale(self, layer):
# Following parameters are not supported now:
# - negative axis
# - num_axes
# - filler
# - bias_filler
# Get layer parameters.
bottom = layer.bottom
blobs = layer.blobs
axis = layer.scale_param.axis
bias_term = layer.scale_param.bias_term
# Case of only one bottom where W is learnt parameter.
if len(bottom) == 1:
W_shape = blobs[0].shape.dim
func = scale.Scale(axis, W_shape, bias_term)
_Blob(blobs[0])(func.W.data)
if bias_term:
_Blob(blobs[1])(func.bias.b.data)
# Case of two bottoms where W is given as a bottom.
else:
shape = blobs[0].shape.dim if bias_term else None
func = scale.Scale(
axis, bias_term=bias_term, bias_shape=shape)
if bias_term:
_Blob(blobs[0])(func.bias.b.data)
# Add layer.
with self.init_scope():
setattr(self, layer.name, func)
self.forwards[layer.name] = _CallChildLink(self, layer.name)
self._add_layer(layer)
@_layer('Slice', 'SLICE')
def _setup_slice(self, layer):
if layer.slice_param.HasField('axis'):
axis = layer.slice_param.axis
elif layer.slice_param.HasField('slice_dim'):
axis = layer.slice_param.slice_dim
else:
axis = 1
if layer.slice_param.slice_point:
indices_or_sections = list(layer.slice_param.slice_point)
else:
indices_or_sections = len(list(layer.top))
self.forwards[layer.name] = _SingleArgumentFunction(
functions.split_axis,
indices_or_sections=indices_or_sections,
axis=axis
)
self._add_layer(layer)
@_layer('Softmax', 'SOFTMAX')
def _setup_softmax(self, layer):
if layer.softmax_param.axis != 1:
raise RuntimeError(
'Softmax along non-channel axis is not supported')
if layer.softmax_param.engine == 0: # DEFAULT
fw = functions.softmax
elif layer.softmax_param.engine == 1: # CAFFE
fw = _SingleArgumentFunctionWithCudnn(False, functions.softmax)
elif layer.softmax_param.engine == 2: # CUDNN
fw = _SingleArgumentFunctionWithCudnn(True, functions.softmax)
self.forwards[layer.name] = fw
self._add_layer(layer)
@_layer('Sigmoid', 'SIGMOID')
def _setup_sigmoid(self, layer):
if layer.sigmoid_param.engine == 0: # DEFAULT
fw = functions.sigmoid
elif layer.sigmoid_param.engine == 1: # CAFFE
fw = _SingleArgumentFunctionWithCudnn(False, functions.sigmoid)
elif layer.sigmoid_param.engine == 2: # CUDNN
fw = _SingleArgumentFunctionWithCudnn(True, functions.sigmoid)
self.forwards[layer.name] = fw
self._add_layer(layer)
@_layer('SoftmaxWithLoss', 'SOFTMAX_LOSS')
def _setup_softmax_with_loss(self, layer):
if layer.softmax_param.axis != 1:
raise RuntimeError(
'Softmax along non-channel axis is not supported')
self.forwards[layer.name] = functions.softmax_cross_entropy
self._add_layer(layer)
@_layer('Split', 'SPLIT')
def _setup_split(self, layer):
for top in layer.top:
self.split_map[top] = layer.bottom[0]
# Internal functions
def _get_ksize(param):
if param.kernel_h > 0:
return param.kernel_h, param.kernel_w
elif type(param.kernel_size) == int:
return param.kernel_size
elif len(param.kernel_size) == 1:
return param.kernel_size[0]
else:
return param.kernel_size
def _get_stride(param):
if param.stride_h > 0:
return param.stride_h, param.stride_w
elif type(param.stride) == int:
return param.stride
elif len(param.stride) == 0:
return 1
elif len(param.stride) == 1:
return param.stride[0]
else:
return param.stride
def _get_pad(param):
if param.pad_h > 0 or param.pad_w > 0:
return param.pad_h, param.pad_w
elif type(param.pad) == int:
return param.pad
elif len(param.pad) == 0:
return 0
elif len(param.pad) == 1:
return param.pad[0]
else:
return param.pad
def _get_num(blob):
if blob.num > 0:
return blob.num
else:
return blob.shape.dim[0]
def _get_channels(blob):
if blob.channels > 0:
return blob.channels
else:
return blob.shape.dim[1]
def _get_height(blob):
if blob.height > 0:
return blob.height
elif len(blob.shape.dim) == 2:
return blob.shape.dim[0]
elif len(blob.shape.dim) == 4:
return blob.shape.dim[2]
else:
raise RuntimeError(
'{}-dimensional array is not supported'.format(
len(blob.shape.dim)))
def _get_width(blob):
if blob.width > 0:
return blob.width
elif len(blob.shape.dim) == 2:
return blob.shape.dim[1]
elif len(blob.shape.dim) == 4:
return blob.shape.dim[3]
else:
raise RuntimeError(
'{}-dimensional array is not supported'.format(
len(blob.shape.dim)))
# Internal class
# __call__ must return Variable or tuple
class _SingleArgumentFunction(object):
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __call__(self, x):
return self.func(x, *self.args, **self.kwargs)
class _SingleArgumentFunctionTestMode(_SingleArgumentFunction):
def __call__(self, x):
with configuration.using_config('train', False):
return super(_SingleArgumentFunctionTestMode, self).__call__(x)
class _ListArgumentFcuntion(object):
def __init__(self, func, **kwargs):
self.func = func
self.kwargs = kwargs
def __call__(self, *xs):
return self.func(xs, **self.kwargs)
class _SingleArgumentFunctionWithCudnn(_SingleArgumentFunction):
def __init__(self, use_cudnn, func, *args, **kwargs):
super(_SingleArgumentFunctionWithCudnn, self).__init__(
func, *args, **kwargs)
self.use_cudnn = use_cudnn
def __call__(self, x):
with configuration.using_config('use_cudnn', self.use_cudnn):
return super(_SingleArgumentFunctionWithCudnn, self).__call__(x)
class _CallChildLink(object):
def __init__(self, caffe_func, name):
self.name = name
self.caffe_func = caffe_func
def __call__(self, *xs, **kwargs):
return self.caffe_func[self.name](*xs, **kwargs)
class _EltwiseFunction(object):
def __init__(self, operation, coeffs=None):
if coeffs is not None:
assert len(coeffs) > 0
self.operation = operation
self.coeffs = coeffs
def __call__(self, *xs):
operation = self.operation
if operation == 0: # PROD
return six.moves.reduce(lambda x, y: x * y, xs),
elif operation == 1: # SUM
coeffs = self.coeffs
if coeffs is not None:
assert len(xs) == len(coeffs)
xs = [x * coeff for x, coeff in zip(xs, coeffs)]
return six.moves.reduce(lambda x, y: x + y, xs),
elif operation == 2: # MAX
return six.moves.reduce(lambda x, y: functions.maximum(x, y), xs),
else:
raise ValueError('Invalid EltwiseParameter.EltwiseOp value.')
| 22,528
| 31.841108
| 79
|
py
|
chainer
|
chainer-master/chainer/links/caffe/__init__.py
|
from chainer.links.caffe.caffe_function import CaffeFunction # NOQA
| 69
| 34
| 68
|
py
|
chainer
|
chainer-master/chainer/links/caffe/protobuf3/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/links/caffe/protobuf3/caffe_pb2.py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: caffe.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='caffe.proto',
package='caffe',
syntax='proto2',
serialized_pb=_b('\n\x0b\x63\x61\x66\x66\x65.proto\x12\x05\x63\x61\x66\x66\x65\"\x1c\n\tBlobShape\x12\x0f\n\x03\x64im\x18\x01 \x03(\x03\x42\x02\x10\x01\"\xcc\x01\n\tBlobProto\x12\x1f\n\x05shape\x18\x07 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x10\n\x04\x64\x61ta\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x10\n\x04\x64iff\x18\x06 \x03(\x02\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_data\x18\x08 \x03(\x01\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_diff\x18\t \x03(\x01\x42\x02\x10\x01\x12\x0e\n\x03num\x18\x01 \x01(\x05:\x01\x30\x12\x13\n\x08\x63hannels\x18\x02 \x01(\x05:\x01\x30\x12\x11\n\x06height\x18\x03 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\x05:\x01\x30\"2\n\x0f\x42lobProtoVector\x12\x1f\n\x05\x62lobs\x18\x01 \x03(\x0b\x32\x10.caffe.BlobProto\"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse\"\x8a\x02\n\x0f\x46illerParameter\x12\x16\n\x04type\x18\x01 \x01(\t:\x08\x63onstant\x12\x10\n\x05value\x18\x02 \x01(\x02:\x01\x30\x12\x0e\n\x03min\x18\x03 \x01(\x02:\x01\x30\x12\x0e\n\x03max\x18\x04 \x01(\x02:\x01\x31\x12\x0f\n\x04mean\x18\x05 \x01(\x02:\x01\x30\x12\x0e\n\x03std\x18\x06 \x01(\x02:\x01\x31\x12\x12\n\x06sparse\x18\x07 \x01(\x05:\x02-1\x12\x42\n\rvariance_norm\x18\x08 \x01(\x0e\x32#.caffe.FillerParameter.VarianceNorm:\x06\x46\x41N_IN\"4\n\x0cVarianceNorm\x12\n\n\x06\x46\x41N_IN\x10\x00\x12\x0b\n\x07\x46\x41N_OUT\x10\x01\x12\x0b\n\x07\x41VERAGE\x10\x02\"\x8e\x02\n\x0cNetParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x03 \x03(\t\x12%\n\x0binput_shape\x18\x08 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x11\n\tinput_dim\x18\x04 \x03(\x05\x12\x1d\n\x0e\x66orce_backward\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x05state\x18\x06 \x01(\x0b\x32\x0f.caffe.NetState\x12\x19\n\ndebug_info\x18\x07 \x01(\x08:\x05\x66\x61lse\x12$\n\x05layer\x18\x64 \x03(\x0b\x32\x15.caffe.LayerParameter\x12\'\n\x06layers\x18\x02 \x03(\x0b\x32\x17.caffe.V1LayerParameter\"\x9c\n\n\x0fSolverParameter\x12\x0b\n\x03net\x18\x18 \x01(\t\x12&\n\tnet_param\x18\x19 \x01(\x0b\x32\x13.caffe.NetParameter\x12\x11\n\ttrain_net\x18\x01 \x01(\t\x12\x10\n\x08test_net\x18\x02 \x03(\t\x12,\n\x0ftrain_net_param\x18\x15 \x01(\x0b\x32\x13.caffe.NetParameter\x12+\n\x0etest_net_param\x18\x16 \x03(\x0b\x32\x13.caffe.NetParameter\x12$\n\x0btrain_state\x18\x1a \x01(\x0b\x32\x0f.caffe.NetState\x12#\n\ntest_state\x18\x1b \x03(\x0b\x32\x0f.caffe.NetState\x12\x11\n\ttest_iter\x18\x03 \x03(\x05\x12\x18\n\rtest_interval\x18\x04 \x01(\x05:\x01\x30\x12 \n\x11test_compute_loss\x18\x13 \x01(\x08:\x05\x66\x61lse\x12!\n\x13test_initialization\x18 \x01(\x08:\x04true\x12\x0f\n\x07\x62\x61se_lr\x18\x05 \x01(\x02\x12\x0f\n\x07\x64isplay\x18\x06 \x01(\x05\x12\x17\n\x0c\x61verage_loss\x18! \x01(\x05:\x01\x31\x12\x10\n\x08max_iter\x18\x07 \x01(\x05\x12\x14\n\titer_size\x18$ \x01(\x05:\x01\x31\x12\x11\n\tlr_policy\x18\x08 \x01(\t\x12\r\n\x05gamma\x18\t \x01(\x02\x12\r\n\x05power\x18\n \x01(\x02\x12\x10\n\x08momentum\x18\x0b \x01(\x02\x12\x14\n\x0cweight_decay\x18\x0c \x01(\x02\x12\x1f\n\x13regularization_type\x18\x1d \x01(\t:\x02L2\x12\x10\n\x08stepsize\x18\r \x01(\x05\x12\x11\n\tstepvalue\x18\" \x03(\x05\x12\x1a\n\x0e\x63lip_gradients\x18# \x01(\x02:\x02-1\x12\x13\n\x08snapshot\x18\x0e \x01(\x05:\x01\x30\x12\x17\n\x0fsnapshot_prefix\x18\x0f \x01(\t\x12\x1c\n\rsnapshot_diff\x18\x10 \x01(\x08:\x05\x66\x61lse\x12K\n\x0fsnapshot_format\x18% \x01(\x0e\x32%.caffe.SolverParameter.SnapshotFormat:\x0b\x42INARYPROTO\x12;\n\x0bsolver_mode\x18\x11 \x01(\x0e\x32!.caffe.SolverParameter.SolverMode:\x03GPU\x12\x14\n\tdevice_id\x18\x12 \x01(\x05:\x01\x30\x12\x17\n\x0brandom_seed\x18\x14 \x01(\x03:\x02-1\x12\x11\n\x04type\x18( \x01(\t:\x03SGD\x12\x14\n\x05\x64\x65lta\x18\x1f \x01(\x02:\x05\x31\x65-08\x12\x18\n\tmomentum2\x18\' \x01(\x02:\x05\x30.999\x12\x11\n\trms_decay\x18& \x01(\x02\x12\x19\n\ndebug_info\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\"\n\x14snapshot_after_train\x18\x1c \x01(\x08:\x04true\x12;\n\x0bsolver_type\x18\x1e \x01(\x0e\x32!.caffe.SolverParameter.SolverType:\x03SGD\"+\n\x0eSnapshotFormat\x12\x08\n\x04HDF5\x10\x00\x12\x0f\n\x0b\x42INARYPROTO\x10\x01\"\x1e\n\nSolverMode\x12\x07\n\x03\x43PU\x10\x00\x12\x07\n\x03GPU\x10\x01\"U\n\nSolverType\x12\x07\n\x03SGD\x10\x00\x12\x0c\n\x08NESTEROV\x10\x01\x12\x0b\n\x07\x41\x44\x41GRAD\x10\x02\x12\x0b\n\x07RMSPROP\x10\x03\x12\x0c\n\x08\x41\x44\x41\x44\x45LTA\x10\x04\x12\x08\n\x04\x41\x44\x41M\x10\x05\"l\n\x0bSolverState\x12\x0c\n\x04iter\x18\x01 \x01(\x05\x12\x13\n\x0blearned_net\x18\x02 \x01(\t\x12!\n\x07history\x18\x03 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x17\n\x0c\x63urrent_step\x18\x04 \x01(\x05:\x01\x30\"N\n\x08NetState\x12!\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase:\x04TEST\x12\x10\n\x05level\x18\x02 \x01(\x05:\x01\x30\x12\r\n\x05stage\x18\x03 \x03(\t\"s\n\x0cNetStateRule\x12\x1b\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase\x12\x11\n\tmin_level\x18\x02 \x01(\x05\x12\x11\n\tmax_level\x18\x03 \x01(\x05\x12\r\n\x05stage\x18\x04 \x03(\t\x12\x11\n\tnot_stage\x18\x05 \x03(\t\"\xa3\x01\n\tParamSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\nshare_mode\x18\x02 \x01(\x0e\x32\x1d.caffe.ParamSpec.DimCheckMode\x12\x12\n\x07lr_mult\x18\x03 \x01(\x02:\x01\x31\x12\x15\n\ndecay_mult\x18\x04 \x01(\x02:\x01\x31\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xc0\x12\n\x0eLayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06\x62ottom\x18\x03 \x03(\t\x12\x0b\n\x03top\x18\x04 \x03(\t\x12\x1b\n\x05phase\x18\n \x01(\x0e\x32\x0c.caffe.Phase\x12\x13\n\x0bloss_weight\x18\x05 \x03(\x02\x12\x1f\n\x05param\x18\x06 \x03(\x0b\x32\x10.caffe.ParamSpec\x12\x1f\n\x05\x62lobs\x18\x07 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x16\n\x0epropagate_down\x18\x0b \x03(\x08\x12$\n\x07include\x18\x08 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18\t \x03(\x0b\x32\x13.caffe.NetStateRule\x12\x37\n\x0ftransform_param\x18\x64 \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18\x65 \x01(\x0b\x32\x14.caffe.LossParameter\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x66 \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18g \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12\x34\n\x10\x62\x61tch_norm_param\x18\x8b\x01 \x01(\x0b\x32\x19.caffe.BatchNormParameter\x12)\n\nbias_param\x18\x8d\x01 \x01(\x0b\x32\x14.caffe.BiasParameter\x12,\n\x0c\x63oncat_param\x18h \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18i \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18j \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12(\n\ndata_param\x18k \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18l \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18m \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18n \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12\'\n\telu_param\x18\x8c\x01 \x01(\x0b\x32\x13.caffe.ELUParameter\x12+\n\x0b\x65mbed_param\x18\x89\x01 \x01(\x0b\x32\x15.caffe.EmbedParameter\x12&\n\texp_param\x18o \x01(\x0b\x32\x13.caffe.ExpParameter\x12/\n\rflatten_param\x18\x87\x01 \x01(\x0b\x32\x17.caffe.FlattenParameter\x12\x31\n\x0fhdf5_data_param\x18p \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18q \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18r \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18s \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18t \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18u \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12\'\n\tlog_param\x18\x86\x01 \x01(\x0b\x32\x13.caffe.LogParameter\x12&\n\tlrn_param\x18v \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18w \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18x \x01(\x0b\x32\x13.caffe.MVNParameter\x12.\n\rpooling_param\x18y \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18z \x01(\x0b\x32\x15.caffe.PowerParameter\x12+\n\x0bprelu_param\x18\x83\x01 \x01(\x0b\x32\x15.caffe.PReLUParameter\x12-\n\x0cpython_param\x18\x82\x01 \x01(\x0b\x32\x16.caffe.PythonParameter\x12\x33\n\x0freduction_param\x18\x88\x01 \x01(\x0b\x32\x19.caffe.ReductionParameter\x12(\n\nrelu_param\x18{ \x01(\x0b\x32\x14.caffe.ReLUParameter\x12/\n\rreshape_param\x18\x85\x01 \x01(\x0b\x32\x17.caffe.ReshapeParameter\x12+\n\x0bscale_param\x18\x8e\x01 \x01(\x0b\x32\x15.caffe.ScaleParameter\x12.\n\rsigmoid_param\x18| \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18} \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12\'\n\tspp_param\x18\x84\x01 \x01(\x0b\x32\x13.caffe.SPPParameter\x12*\n\x0bslice_param\x18~ \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18\x7f \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x33\n\x0fthreshold_param\x18\x80\x01 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12)\n\ntile_param\x18\x8a\x01 \x01(\x0b\x32\x14.caffe.TileParameter\x12\x36\n\x11window_data_param\x18\x81\x01 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\"\xb6\x01\n\x17TransformationParameter\x12\x10\n\x05scale\x18\x01 \x01(\x02:\x01\x31\x12\x15\n\x06mirror\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x14\n\tcrop_size\x18\x03 \x01(\r:\x01\x30\x12\x11\n\tmean_file\x18\x04 \x01(\t\x12\x12\n\nmean_value\x18\x05 \x03(\x02\x12\x1a\n\x0b\x66orce_color\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\nforce_gray\x18\x07 \x01(\x08:\x05\x66\x61lse\"\xc2\x01\n\rLossParameter\x12\x14\n\x0cignore_label\x18\x01 \x01(\x05\x12\x44\n\rnormalization\x18\x03 \x01(\x0e\x32&.caffe.LossParameter.NormalizationMode:\x05VALID\x12\x11\n\tnormalize\x18\x02 \x01(\x08\"B\n\x11NormalizationMode\x12\x08\n\x04\x46ULL\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x0e\n\nBATCH_SIZE\x10\x02\x12\x08\n\x04NONE\x10\x03\"L\n\x11\x41\x63\x63uracyParameter\x12\x10\n\x05top_k\x18\x01 \x01(\r:\x01\x31\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x14\n\x0cignore_label\x18\x03 \x01(\x05\"M\n\x0f\x41rgMaxParameter\x12\x1a\n\x0bout_max_val\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x05top_k\x18\x02 \x01(\r:\x01\x31\x12\x0c\n\x04\x61xis\x18\x03 \x01(\x05\"9\n\x0f\x43oncatParameter\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x15\n\nconcat_dim\x18\x01 \x01(\r:\x01\x31\"j\n\x12\x42\x61tchNormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12&\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x05\x30.999\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-05\"]\n\rBiasParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\"L\n\x18\x43ontrastiveLossParameter\x12\x11\n\x06margin\x18\x01 \x01(\x02:\x01\x31\x12\x1d\n\x0elegacy_version\x18\x02 \x01(\x08:\x05\x66\x61lse\"\xfc\x03\n\x14\x43onvolutionParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x0b\n\x03pad\x18\x03 \x03(\r\x12\x13\n\x0bkernel_size\x18\x04 \x03(\r\x12\x0e\n\x06stride\x18\x06 \x03(\r\x12\x10\n\x08\x64ilation\x18\x12 \x03(\r\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x10\n\x08kernel_h\x18\x0b \x01(\r\x12\x10\n\x08kernel_w\x18\x0c \x01(\r\x12\x10\n\x08stride_h\x18\r \x01(\r\x12\x10\n\x08stride_w\x18\x0e \x01(\r\x12\x10\n\x05group\x18\x05 \x01(\r:\x01\x31\x12-\n\rweight_filler\x18\x07 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x08 \x01(\x0b\x32\x16.caffe.FillerParameter\x12;\n\x06\x65ngine\x18\x0f \x01(\x0e\x32\".caffe.ConvolutionParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x10 \x01(\x05:\x01\x31\x12\x1e\n\x0f\x66orce_nd_im2col\x18\x11 \x01(\x08:\x05\x66\x61lse\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xa4\x02\n\rDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x31\n\x07\x62\x61\x63kend\x18\x08 \x01(\x0e\x32\x17.caffe.DataParameter.DB:\x07LEVELDB\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x66orce_encoded_color\x18\t \x01(\x08:\x05\x66\x61lse\x12\x13\n\x08prefetch\x18\n \x01(\r:\x01\x34\"\x1b\n\x02\x44\x42\x12\x0b\n\x07LEVELDB\x10\x00\x12\x08\n\x04LMDB\x10\x01\".\n\x10\x44ropoutParameter\x12\x1a\n\rdropout_ratio\x18\x01 \x01(\x02:\x03\x30.5\"\xa0\x01\n\x12\x44ummyDataParameter\x12+\n\x0b\x64\x61ta_filler\x18\x01 \x03(\x0b\x32\x16.caffe.FillerParameter\x12\x1f\n\x05shape\x18\x06 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x0b\n\x03num\x18\x02 \x03(\r\x12\x10\n\x08\x63hannels\x18\x03 \x03(\r\x12\x0e\n\x06height\x18\x04 \x03(\r\x12\r\n\x05width\x18\x05 \x03(\r\"\xa5\x01\n\x10\x45ltwiseParameter\x12\x39\n\toperation\x18\x01 \x01(\x0e\x32!.caffe.EltwiseParameter.EltwiseOp:\x03SUM\x12\r\n\x05\x63oeff\x18\x02 \x03(\x02\x12\x1e\n\x10stable_prod_grad\x18\x03 \x01(\x08:\x04true\"\'\n\tEltwiseOp\x12\x08\n\x04PROD\x10\x00\x12\x07\n\x03SUM\x10\x01\x12\x07\n\x03MAX\x10\x02\" \n\x0c\x45LUParameter\x12\x10\n\x05\x61lpha\x18\x01 \x01(\x02:\x01\x31\"\xac\x01\n\x0e\x45mbedParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x11\n\tinput_dim\x18\x02 \x01(\r\x12\x17\n\tbias_term\x18\x03 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"D\n\x0c\x45xpParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"9\n\x10\x46lattenParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x14\n\x08\x65nd_axis\x18\x02 \x01(\x05:\x02-1\"O\n\x11HDF5DataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x02 \x01(\r\x12\x16\n\x07shuffle\x18\x03 \x01(\x08:\x05\x66\x61lse\"(\n\x13HDF5OutputParameter\x12\x11\n\tfile_name\x18\x01 \x01(\t\"^\n\x12HingeLossParameter\x12\x30\n\x04norm\x18\x01 \x01(\x0e\x32\x1e.caffe.HingeLossParameter.Norm:\x02L1\"\x16\n\x04Norm\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\"\x97\x02\n\x12ImageDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x15\n\nbatch_size\x18\x04 \x01(\r:\x01\x31\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x16\n\x07shuffle\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nnew_height\x18\t \x01(\r:\x01\x30\x12\x14\n\tnew_width\x18\n \x01(\r:\x01\x30\x12\x16\n\x08is_color\x18\x0b \x01(\x08:\x04true\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\x0c \x01(\t:\x00\"\'\n\x15InfogainLossParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\"\xb1\x01\n\x15InnerProductParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0f\n\x04\x61xis\x18\x05 \x01(\x05:\x01\x31\"D\n\x0cLogParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"\xb8\x02\n\x0cLRNParameter\x12\x15\n\nlocal_size\x18\x01 \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x02 \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x03 \x01(\x02:\x04\x30.75\x12\x44\n\x0bnorm_region\x18\x04 \x01(\x0e\x32\x1e.caffe.LRNParameter.NormRegion:\x0f\x41\x43ROSS_CHANNELS\x12\x0c\n\x01k\x18\x05 \x01(\x02:\x01\x31\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.LRNParameter.Engine:\x07\x44\x45\x46\x41ULT\"5\n\nNormRegion\x12\x13\n\x0f\x41\x43ROSS_CHANNELS\x10\x00\x12\x12\n\x0eWITHIN_CHANNEL\x10\x01\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Z\n\x13MemoryDataParameter\x12\x12\n\nbatch_size\x18\x01 \x01(\r\x12\x10\n\x08\x63hannels\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\r\n\x05width\x18\x04 \x01(\r\"d\n\x0cMVNParameter\x12 \n\x12normalize_variance\x18\x01 \x01(\x08:\x04true\x12\x1e\n\x0f\x61\x63ross_channels\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-09\"\xa2\x03\n\x10PoolingParameter\x12\x35\n\x04pool\x18\x01 \x01(\x0e\x32\".caffe.PoolingParameter.PoolMethod:\x03MAX\x12\x0e\n\x03pad\x18\x04 \x01(\r:\x01\x30\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x02 \x01(\r\x12\x10\n\x08kernel_h\x18\x05 \x01(\r\x12\x10\n\x08kernel_w\x18\x06 \x01(\r\x12\x11\n\x06stride\x18\x03 \x01(\r:\x01\x31\x12\x10\n\x08stride_h\x18\x07 \x01(\r\x12\x10\n\x08stride_w\x18\x08 \x01(\r\x12\x37\n\x06\x65ngine\x18\x0b \x01(\x0e\x32\x1e.caffe.PoolingParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x1d\n\x0eglobal_pooling\x18\x0c \x01(\x08:\x05\x66\x61lse\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"F\n\x0ePowerParameter\x12\x10\n\x05power\x18\x01 \x01(\x02:\x01\x31\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"g\n\x0fPythonParameter\x12\x0e\n\x06module\x18\x01 \x01(\t\x12\r\n\x05layer\x18\x02 \x01(\t\x12\x13\n\tparam_str\x18\x03 \x01(\t:\x00\x12 \n\x11share_in_parallel\x18\x04 \x01(\x08:\x05\x66\x61lse\"\xad\x01\n\x12ReductionParameter\x12=\n\toperation\x18\x01 \x01(\x0e\x32%.caffe.ReductionParameter.ReductionOp:\x03SUM\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x10\n\x05\x63oeff\x18\x03 \x01(\x02:\x01\x31\"5\n\x0bReductionOp\x12\x07\n\x03SUM\x10\x01\x12\x08\n\x04\x41SUM\x10\x02\x12\t\n\x05SUMSQ\x10\x03\x12\x08\n\x04MEAN\x10\x04\"\x8d\x01\n\rReLUParameter\x12\x19\n\x0enegative_slope\x18\x01 \x01(\x02:\x01\x30\x12\x34\n\x06\x65ngine\x18\x02 \x01(\x0e\x32\x1b.caffe.ReLUParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Z\n\x10ReshapeParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\x08num_axes\x18\x03 \x01(\x05:\x02-1\"\xa5\x01\n\x0eScaleParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x18\n\tbias_term\x18\x04 \x01(\x08:\x05\x66\x61lse\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"x\n\x10SigmoidParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SigmoidParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"L\n\x0eSliceParameter\x12\x0f\n\x04\x61xis\x18\x03 \x01(\x05:\x01\x31\x12\x13\n\x0bslice_point\x18\x02 \x03(\r\x12\x14\n\tslice_dim\x18\x01 \x01(\r:\x01\x31\"\x89\x01\n\x10SoftmaxParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SoftmaxParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"r\n\rTanHParameter\x12\x34\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1b.caffe.TanHParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"/\n\rTileParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\r\n\x05tiles\x18\x02 \x01(\x05\"*\n\x12ThresholdParameter\x12\x14\n\tthreshold\x18\x01 \x01(\x02:\x01\x30\"\xc1\x02\n\x13WindowDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0c\x66g_threshold\x18\x07 \x01(\x02:\x03\x30.5\x12\x19\n\x0c\x62g_threshold\x18\x08 \x01(\x02:\x03\x30.5\x12\x19\n\x0b\x66g_fraction\x18\t \x01(\x02:\x04\x30.25\x12\x16\n\x0b\x63ontext_pad\x18\n \x01(\r:\x01\x30\x12\x17\n\tcrop_mode\x18\x0b \x01(\t:\x04warp\x12\x1b\n\x0c\x63\x61\x63he_images\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\r \x01(\t:\x00\"\xeb\x01\n\x0cSPPParameter\x12\x16\n\x0epyramid_height\x18\x01 \x01(\r\x12\x31\n\x04pool\x18\x02 \x01(\x0e\x32\x1e.caffe.SPPParameter.PoolMethod:\x03MAX\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.SPPParameter.Engine:\x07\x44\x45\x46\x41ULT\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xe0\x13\n\x10V1LayerParameter\x12\x0e\n\x06\x62ottom\x18\x02 \x03(\t\x12\x0b\n\x03top\x18\x03 \x03(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12$\n\x07include\x18 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18! \x03(\x0b\x32\x13.caffe.NetStateRule\x12/\n\x04type\x18\x05 \x01(\x0e\x32!.caffe.V1LayerParameter.LayerType\x12\x1f\n\x05\x62lobs\x18\x06 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x0e\n\x05param\x18\xe9\x07 \x03(\t\x12>\n\x0f\x62lob_share_mode\x18\xea\x07 \x03(\x0e\x32$.caffe.V1LayerParameter.DimCheckMode\x12\x10\n\x08\x62lobs_lr\x18\x07 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x08 \x03(\x02\x12\x13\n\x0bloss_weight\x18# \x03(\x02\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x1b \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18\x17 \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12,\n\x0c\x63oncat_param\x18\t \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18( \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18\n \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12(\n\ndata_param\x18\x0b \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18\x0c \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18\x1a \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18\x18 \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12&\n\texp_param\x18) \x01(\x0b\x32\x13.caffe.ExpParameter\x12\x31\n\x0fhdf5_data_param\x18\r \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18\x0e \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18\x1d \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18\x0f \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18\x10 \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18\x11 \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12&\n\tlrn_param\x18\x12 \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18\x16 \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18\" \x01(\x0b\x32\x13.caffe.MVNParameter\x12.\n\rpooling_param\x18\x13 \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18\x15 \x01(\x0b\x32\x15.caffe.PowerParameter\x12(\n\nrelu_param\x18\x1e \x01(\x0b\x32\x14.caffe.ReLUParameter\x12.\n\rsigmoid_param\x18& \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18\' \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12*\n\x0bslice_param\x18\x1f \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18% \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x32\n\x0fthreshold_param\x18\x19 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12\x35\n\x11window_data_param\x18\x14 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\x12\x37\n\x0ftransform_param\x18$ \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18* \x01(\x0b\x32\x14.caffe.LossParameter\x12&\n\x05layer\x18\x01 \x01(\x0b\x32\x17.caffe.V0LayerParameter\"\xd8\x04\n\tLayerType\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06\x41\x42SVAL\x10#\x12\x0c\n\x08\x41\x43\x43URACY\x10\x01\x12\n\n\x06\x41RGMAX\x10\x1e\x12\x08\n\x04\x42NLL\x10\x02\x12\n\n\x06\x43ONCAT\x10\x03\x12\x14\n\x10\x43ONTRASTIVE_LOSS\x10%\x12\x0f\n\x0b\x43ONVOLUTION\x10\x04\x12\x08\n\x04\x44\x41TA\x10\x05\x12\x11\n\rDECONVOLUTION\x10\'\x12\x0b\n\x07\x44ROPOUT\x10\x06\x12\x0e\n\nDUMMY_DATA\x10 \x12\x12\n\x0e\x45UCLIDEAN_LOSS\x10\x07\x12\x0b\n\x07\x45LTWISE\x10\x19\x12\x07\n\x03\x45XP\x10&\x12\x0b\n\x07\x46LATTEN\x10\x08\x12\r\n\tHDF5_DATA\x10\t\x12\x0f\n\x0bHDF5_OUTPUT\x10\n\x12\x0e\n\nHINGE_LOSS\x10\x1c\x12\n\n\x06IM2COL\x10\x0b\x12\x0e\n\nIMAGE_DATA\x10\x0c\x12\x11\n\rINFOGAIN_LOSS\x10\r\x12\x11\n\rINNER_PRODUCT\x10\x0e\x12\x07\n\x03LRN\x10\x0f\x12\x0f\n\x0bMEMORY_DATA\x10\x1d\x12\x1d\n\x19MULTINOMIAL_LOGISTIC_LOSS\x10\x10\x12\x07\n\x03MVN\x10\"\x12\x0b\n\x07POOLING\x10\x11\x12\t\n\x05POWER\x10\x1a\x12\x08\n\x04RELU\x10\x12\x12\x0b\n\x07SIGMOID\x10\x13\x12\x1e\n\x1aSIGMOID_CROSS_ENTROPY_LOSS\x10\x1b\x12\x0b\n\x07SILENCE\x10$\x12\x0b\n\x07SOFTMAX\x10\x14\x12\x10\n\x0cSOFTMAX_LOSS\x10\x15\x12\t\n\x05SPLIT\x10\x16\x12\t\n\x05SLICE\x10!\x12\x08\n\x04TANH\x10\x17\x12\x0f\n\x0bWINDOW_DATA\x10\x18\x12\r\n\tTHRESHOLD\x10\x1f\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xfd\x07\n\x10V0LayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x12\n\nnum_output\x18\x03 \x01(\r\x12\x16\n\x08\x62iasterm\x18\x04 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x06 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0e\n\x03pad\x18\x07 \x01(\r:\x01\x30\x12\x12\n\nkernelsize\x18\x08 \x01(\r\x12\x10\n\x05group\x18\t \x01(\r:\x01\x31\x12\x11\n\x06stride\x18\n \x01(\r:\x01\x31\x12\x35\n\x04pool\x18\x0b \x01(\x0e\x32\".caffe.V0LayerParameter.PoolMethod:\x03MAX\x12\x1a\n\rdropout_ratio\x18\x0c \x01(\x02:\x03\x30.5\x12\x15\n\nlocal_size\x18\r \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x0e \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x0f \x01(\x02:\x04\x30.75\x12\x0c\n\x01k\x18\x16 \x01(\x02:\x01\x31\x12\x0e\n\x06source\x18\x10 \x01(\t\x12\x10\n\x05scale\x18\x11 \x01(\x02:\x01\x31\x12\x10\n\x08meanfile\x18\x12 \x01(\t\x12\x11\n\tbatchsize\x18\x13 \x01(\r\x12\x13\n\x08\x63ropsize\x18\x14 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x15 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x05\x62lobs\x18\x32 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x10\n\x08\x62lobs_lr\x18\x33 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x34 \x03(\x02\x12\x14\n\trand_skip\x18\x35 \x01(\r:\x01\x30\x12\x1d\n\x10\x64\x65t_fg_threshold\x18\x36 \x01(\x02:\x03\x30.5\x12\x1d\n\x10\x64\x65t_bg_threshold\x18\x37 \x01(\x02:\x03\x30.5\x12\x1d\n\x0f\x64\x65t_fg_fraction\x18\x38 \x01(\x02:\x04\x30.25\x12\x1a\n\x0f\x64\x65t_context_pad\x18: \x01(\r:\x01\x30\x12\x1b\n\rdet_crop_mode\x18; \x01(\t:\x04warp\x12\x12\n\x07new_num\x18< \x01(\x05:\x01\x30\x12\x17\n\x0cnew_channels\x18= \x01(\x05:\x01\x30\x12\x15\n\nnew_height\x18> \x01(\x05:\x01\x30\x12\x14\n\tnew_width\x18? \x01(\x05:\x01\x30\x12\x1d\n\x0eshuffle_images\x18@ \x01(\x08:\x05\x66\x61lse\x12\x15\n\nconcat_dim\x18\x41 \x01(\r:\x01\x31\x12\x36\n\x11hdf5_output_param\x18\xe9\x07 \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"W\n\x0ePReLUParameter\x12&\n\x06\x66iller\x18\x01 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x1d\n\x0e\x63hannel_shared\x18\x02 \x01(\x08:\x05\x66\x61lse*\x1c\n\x05Phase\x12\t\n\x05TRAIN\x10\x00\x12\x08\n\x04TEST\x10\x01')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PHASE = _descriptor.EnumDescriptor(
name='Phase',
full_name='caffe.Phase',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TRAIN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TEST', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=14776,
serialized_end=14804,
)
_sym_db.RegisterEnumDescriptor(_PHASE)
Phase = enum_type_wrapper.EnumTypeWrapper(_PHASE)
TRAIN = 0
TEST = 1
_FILLERPARAMETER_VARIANCENORM = _descriptor.EnumDescriptor(
name='VarianceNorm',
full_name='caffe.FillerParameter.VarianceNorm',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FAN_IN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FAN_OUT', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AVERAGE', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=658,
serialized_end=710,
)
_sym_db.RegisterEnumDescriptor(_FILLERPARAMETER_VARIANCENORM)
_SOLVERPARAMETER_SNAPSHOTFORMAT = _descriptor.EnumDescriptor(
name='SnapshotFormat',
full_name='caffe.SolverParameter.SnapshotFormat',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='HDF5', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BINARYPROTO', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2132,
serialized_end=2175,
)
_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SNAPSHOTFORMAT)
_SOLVERPARAMETER_SOLVERMODE = _descriptor.EnumDescriptor(
name='SolverMode',
full_name='caffe.SolverParameter.SolverMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='CPU', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GPU', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2177,
serialized_end=2207,
)
_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SOLVERMODE)
_SOLVERPARAMETER_SOLVERTYPE = _descriptor.EnumDescriptor(
name='SolverType',
full_name='caffe.SolverParameter.SolverType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SGD', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NESTEROV', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ADAGRAD', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RMSPROP', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ADADELTA', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ADAM', index=5, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2209,
serialized_end=2294,
)
_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SOLVERTYPE)
_PARAMSPEC_DIMCHECKMODE = _descriptor.EnumDescriptor(
name='DimCheckMode',
full_name='caffe.ParamSpec.DimCheckMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STRICT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PERMISSIVE', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2725,
serialized_end=2767,
)
_sym_db.RegisterEnumDescriptor(_PARAMSPEC_DIMCHECKMODE)
_LOSSPARAMETER_NORMALIZATIONMODE = _descriptor.EnumDescriptor(
name='NormalizationMode',
full_name='caffe.LossParameter.NormalizationMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FULL', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VALID', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BATCH_SIZE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NONE', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=5454,
serialized_end=5520,
)
_sym_db.RegisterEnumDescriptor(_LOSSPARAMETER_NORMALIZATIONMODE)
_CONVOLUTIONPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.ConvolutionParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=6485,
serialized_end=6528,
)
_sym_db.RegisterEnumDescriptor(_CONVOLUTIONPARAMETER_ENGINE)
_DATAPARAMETER_DB = _descriptor.EnumDescriptor(
name='DB',
full_name='caffe.DataParameter.DB',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='LEVELDB', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LMDB', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=6796,
serialized_end=6823,
)
_sym_db.RegisterEnumDescriptor(_DATAPARAMETER_DB)
_ELTWISEPARAMETER_ELTWISEOP = _descriptor.EnumDescriptor(
name='EltwiseOp',
full_name='caffe.EltwiseParameter.EltwiseOp',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PROD', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUM', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MAX', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=7163,
serialized_end=7202,
)
_sym_db.RegisterEnumDescriptor(_ELTWISEPARAMETER_ELTWISEOP)
_HINGELOSSPARAMETER_NORM = _descriptor.EnumDescriptor(
name='Norm',
full_name='caffe.HingeLossParameter.Norm',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='L1', index=0, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='L2', index=1, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=7737,
serialized_end=7759,
)
_sym_db.RegisterEnumDescriptor(_HINGELOSSPARAMETER_NORM)
_LRNPARAMETER_NORMREGION = _descriptor.EnumDescriptor(
name='NormRegion',
full_name='caffe.LRNParameter.NormRegion',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ACROSS_CHANNELS', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WITHIN_CHANNEL', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=8549,
serialized_end=8602,
)
_sym_db.RegisterEnumDescriptor(_LRNPARAMETER_NORMREGION)
_LRNPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.LRNParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=6485,
serialized_end=6528,
)
_sym_db.RegisterEnumDescriptor(_LRNPARAMETER_ENGINE)
_POOLINGPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
name='PoolMethod',
full_name='caffe.PoolingParameter.PoolMethod',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MAX', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AVE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOCHASTIC', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=9171,
serialized_end=9217,
)
_sym_db.RegisterEnumDescriptor(_POOLINGPARAMETER_POOLMETHOD)
_POOLINGPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.PoolingParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=6485,
serialized_end=6528,
)
_sym_db.RegisterEnumDescriptor(_POOLINGPARAMETER_ENGINE)
_REDUCTIONPARAMETER_REDUCTIONOP = _descriptor.EnumDescriptor(
name='ReductionOp',
full_name='caffe.ReductionParameter.ReductionOp',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SUM', index=0, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ASUM', index=1, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUMSQ', index=2, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MEAN', index=3, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=9562,
serialized_end=9615,
)
_sym_db.RegisterEnumDescriptor(_REDUCTIONPARAMETER_REDUCTIONOP)
_RELUPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.ReLUParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=6485,
serialized_end=6528,
)
_sym_db.RegisterEnumDescriptor(_RELUPARAMETER_ENGINE)
_SIGMOIDPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.SigmoidParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=6485,
serialized_end=6528,
)
_sym_db.RegisterEnumDescriptor(_SIGMOIDPARAMETER_ENGINE)
_SOFTMAXPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.SoftmaxParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=6485,
serialized_end=6528,
)
_sym_db.RegisterEnumDescriptor(_SOFTMAXPARAMETER_ENGINE)
_TANHPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.TanHParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=6485,
serialized_end=6528,
)
_sym_db.RegisterEnumDescriptor(_TANHPARAMETER_ENGINE)
_SPPPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
name='PoolMethod',
full_name='caffe.SPPParameter.PoolMethod',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MAX', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AVE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOCHASTIC', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=9171,
serialized_end=9217,
)
_sym_db.RegisterEnumDescriptor(_SPPPARAMETER_POOLMETHOD)
_SPPPARAMETER_ENGINE = _descriptor.EnumDescriptor(
name='Engine',
full_name='caffe.SPPParameter.Engine',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAFFE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUDNN', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=6485,
serialized_end=6528,
)
_sym_db.RegisterEnumDescriptor(_SPPPARAMETER_ENGINE)
_V1LAYERPARAMETER_LAYERTYPE = _descriptor.EnumDescriptor(
name='LayerType',
full_name='caffe.V1LayerParameter.LayerType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ABSVAL', index=1, number=35,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACCURACY', index=2, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ARGMAX', index=3, number=30,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BNLL', index=4, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONCAT', index=5, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONTRASTIVE_LOSS', index=6, number=37,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CONVOLUTION', index=7, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA', index=8, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DECONVOLUTION', index=9, number=39,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DROPOUT', index=10, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DUMMY_DATA', index=11, number=32,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EUCLIDEAN_LOSS', index=12, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ELTWISE', index=13, number=25,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EXP', index=14, number=38,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FLATTEN', index=15, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HDF5_DATA', index=16, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HDF5_OUTPUT', index=17, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HINGE_LOSS', index=18, number=28,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IM2COL', index=19, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMAGE_DATA', index=20, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INFOGAIN_LOSS', index=21, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INNER_PRODUCT', index=22, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LRN', index=23, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MEMORY_DATA', index=24, number=29,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MULTINOMIAL_LOGISTIC_LOSS', index=25, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MVN', index=26, number=34,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POOLING', index=27, number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POWER', index=28, number=26,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RELU', index=29, number=18,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGMOID', index=30, number=19,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGMOID_CROSS_ENTROPY_LOSS', index=31, number=27,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SILENCE', index=32, number=36,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SOFTMAX', index=33, number=20,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SOFTMAX_LOSS', index=34, number=21,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SPLIT', index=35, number=22,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SLICE', index=36, number=33,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TANH', index=37, number=23,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WINDOW_DATA', index=38, number=24,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='THRESHOLD', index=39, number=31,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=13017,
serialized_end=13617,
)
_sym_db.RegisterEnumDescriptor(_V1LAYERPARAMETER_LAYERTYPE)
_V1LAYERPARAMETER_DIMCHECKMODE = _descriptor.EnumDescriptor(
name='DimCheckMode',
full_name='caffe.V1LayerParameter.DimCheckMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STRICT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PERMISSIVE', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2725,
serialized_end=2767,
)
_sym_db.RegisterEnumDescriptor(_V1LAYERPARAMETER_DIMCHECKMODE)
_V0LAYERPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor(
name='PoolMethod',
full_name='caffe.V0LayerParameter.PoolMethod',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MAX', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AVE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOCHASTIC', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=9171,
serialized_end=9217,
)
_sym_db.RegisterEnumDescriptor(_V0LAYERPARAMETER_POOLMETHOD)
_BLOBSHAPE = _descriptor.Descriptor(
name='BlobShape',
full_name='caffe.BlobShape',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dim', full_name='caffe.BlobShape.dim', index=0,
number=1, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=22,
serialized_end=50,
)
_BLOBPROTO = _descriptor.Descriptor(
name='BlobProto',
full_name='caffe.BlobProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shape', full_name='caffe.BlobProto.shape', index=0,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='caffe.BlobProto.data', index=1,
number=5, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='diff', full_name='caffe.BlobProto.diff', index=2,
number=6, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='double_data', full_name='caffe.BlobProto.double_data', index=3,
number=8, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='double_diff', full_name='caffe.BlobProto.double_diff', index=4,
number=9, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='num', full_name='caffe.BlobProto.num', index=5,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='channels', full_name='caffe.BlobProto.channels', index=6,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='height', full_name='caffe.BlobProto.height', index=7,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='width', full_name='caffe.BlobProto.width', index=8,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=53,
serialized_end=257,
)
_BLOBPROTOVECTOR = _descriptor.Descriptor(
name='BlobProtoVector',
full_name='caffe.BlobProtoVector',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='blobs', full_name='caffe.BlobProtoVector.blobs', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=259,
serialized_end=309,
)
_DATUM = _descriptor.Descriptor(
name='Datum',
full_name='caffe.Datum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='channels', full_name='caffe.Datum.channels', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='height', full_name='caffe.Datum.height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='width', full_name='caffe.Datum.width', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data', full_name='caffe.Datum.data', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label', full_name='caffe.Datum.label', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='float_data', full_name='caffe.Datum.float_data', index=5,
number=6, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encoded', full_name='caffe.Datum.encoded', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=312,
serialized_end=441,
)
_FILLERPARAMETER = _descriptor.Descriptor(
name='FillerParameter',
full_name='caffe.FillerParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='caffe.FillerParameter.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("constant").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='caffe.FillerParameter.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min', full_name='caffe.FillerParameter.min', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max', full_name='caffe.FillerParameter.max', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mean', full_name='caffe.FillerParameter.mean', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='std', full_name='caffe.FillerParameter.std', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sparse', full_name='caffe.FillerParameter.sparse', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='variance_norm', full_name='caffe.FillerParameter.variance_norm', index=7,
number=8, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FILLERPARAMETER_VARIANCENORM,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=444,
serialized_end=710,
)
_NETPARAMETER = _descriptor.Descriptor(
name='NetParameter',
full_name='caffe.NetParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='caffe.NetParameter.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='input', full_name='caffe.NetParameter.input', index=1,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='input_shape', full_name='caffe.NetParameter.input_shape', index=2,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='input_dim', full_name='caffe.NetParameter.input_dim', index=3,
number=4, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='force_backward', full_name='caffe.NetParameter.force_backward', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state', full_name='caffe.NetParameter.state', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='debug_info', full_name='caffe.NetParameter.debug_info', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='layer', full_name='caffe.NetParameter.layer', index=7,
number=100, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='layers', full_name='caffe.NetParameter.layers', index=8,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=713,
serialized_end=983,
)
_SOLVERPARAMETER = _descriptor.Descriptor(
name='SolverParameter',
full_name='caffe.SolverParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='net', full_name='caffe.SolverParameter.net', index=0,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='net_param', full_name='caffe.SolverParameter.net_param', index=1,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='train_net', full_name='caffe.SolverParameter.train_net', index=2,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='test_net', full_name='caffe.SolverParameter.test_net', index=3,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='train_net_param', full_name='caffe.SolverParameter.train_net_param', index=4,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='test_net_param', full_name='caffe.SolverParameter.test_net_param', index=5,
number=22, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='train_state', full_name='caffe.SolverParameter.train_state', index=6,
number=26, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='test_state', full_name='caffe.SolverParameter.test_state', index=7,
number=27, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='test_iter', full_name='caffe.SolverParameter.test_iter', index=8,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='test_interval', full_name='caffe.SolverParameter.test_interval', index=9,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='test_compute_loss', full_name='caffe.SolverParameter.test_compute_loss', index=10,
number=19, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='test_initialization', full_name='caffe.SolverParameter.test_initialization', index=11,
number=32, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='base_lr', full_name='caffe.SolverParameter.base_lr', index=12,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='display', full_name='caffe.SolverParameter.display', index=13,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='average_loss', full_name='caffe.SolverParameter.average_loss', index=14,
number=33, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_iter', full_name='caffe.SolverParameter.max_iter', index=15,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='iter_size', full_name='caffe.SolverParameter.iter_size', index=16,
number=36, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lr_policy', full_name='caffe.SolverParameter.lr_policy', index=17,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gamma', full_name='caffe.SolverParameter.gamma', index=18,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='power', full_name='caffe.SolverParameter.power', index=19,
number=10, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='momentum', full_name='caffe.SolverParameter.momentum', index=20,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight_decay', full_name='caffe.SolverParameter.weight_decay', index=21,
number=12, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='regularization_type', full_name='caffe.SolverParameter.regularization_type', index=22,
number=29, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("L2").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stepsize', full_name='caffe.SolverParameter.stepsize', index=23,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stepvalue', full_name='caffe.SolverParameter.stepvalue', index=24,
number=34, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='clip_gradients', full_name='caffe.SolverParameter.clip_gradients', index=25,
number=35, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(-1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='snapshot', full_name='caffe.SolverParameter.snapshot', index=26,
number=14, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='snapshot_prefix', full_name='caffe.SolverParameter.snapshot_prefix', index=27,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='snapshot_diff', full_name='caffe.SolverParameter.snapshot_diff', index=28,
number=16, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='snapshot_format', full_name='caffe.SolverParameter.snapshot_format', index=29,
number=37, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='solver_mode', full_name='caffe.SolverParameter.solver_mode', index=30,
number=17, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_id', full_name='caffe.SolverParameter.device_id', index=31,
number=18, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='random_seed', full_name='caffe.SolverParameter.random_seed', index=32,
number=20, type=3, cpp_type=2, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='caffe.SolverParameter.type', index=33,
number=40, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("SGD").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='delta', full_name='caffe.SolverParameter.delta', index=34,
number=31, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1e-08),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='momentum2', full_name='caffe.SolverParameter.momentum2', index=35,
number=39, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.999),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rms_decay', full_name='caffe.SolverParameter.rms_decay', index=36,
number=38, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='debug_info', full_name='caffe.SolverParameter.debug_info', index=37,
number=23, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='snapshot_after_train', full_name='caffe.SolverParameter.snapshot_after_train', index=38,
number=28, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='solver_type', full_name='caffe.SolverParameter.solver_type', index=39,
number=30, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SOLVERPARAMETER_SNAPSHOTFORMAT,
_SOLVERPARAMETER_SOLVERMODE,
_SOLVERPARAMETER_SOLVERTYPE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=986,
serialized_end=2294,
)
_SOLVERSTATE = _descriptor.Descriptor(
name='SolverState',
full_name='caffe.SolverState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='iter', full_name='caffe.SolverState.iter', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='learned_net', full_name='caffe.SolverState.learned_net', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='history', full_name='caffe.SolverState.history', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='current_step', full_name='caffe.SolverState.current_step', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2296,
serialized_end=2404,
)
_NETSTATE = _descriptor.Descriptor(
name='NetState',
full_name='caffe.NetState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='phase', full_name='caffe.NetState.phase', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='level', full_name='caffe.NetState.level', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stage', full_name='caffe.NetState.stage', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2406,
serialized_end=2484,
)
_NETSTATERULE = _descriptor.Descriptor(
name='NetStateRule',
full_name='caffe.NetStateRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='phase', full_name='caffe.NetStateRule.phase', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_level', full_name='caffe.NetStateRule.min_level', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_level', full_name='caffe.NetStateRule.max_level', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stage', full_name='caffe.NetStateRule.stage', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='not_stage', full_name='caffe.NetStateRule.not_stage', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2486,
serialized_end=2601,
)
_PARAMSPEC = _descriptor.Descriptor(
name='ParamSpec',
full_name='caffe.ParamSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='caffe.ParamSpec.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='share_mode', full_name='caffe.ParamSpec.share_mode', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lr_mult', full_name='caffe.ParamSpec.lr_mult', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='decay_mult', full_name='caffe.ParamSpec.decay_mult', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_PARAMSPEC_DIMCHECKMODE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2604,
serialized_end=2767,
)
_LAYERPARAMETER = _descriptor.Descriptor(
name='LayerParameter',
full_name='caffe.LayerParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='caffe.LayerParameter.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='caffe.LayerParameter.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bottom', full_name='caffe.LayerParameter.bottom', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='top', full_name='caffe.LayerParameter.top', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='phase', full_name='caffe.LayerParameter.phase', index=4,
number=10, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='loss_weight', full_name='caffe.LayerParameter.loss_weight', index=5,
number=5, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='param', full_name='caffe.LayerParameter.param', index=6,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='blobs', full_name='caffe.LayerParameter.blobs', index=7,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='propagate_down', full_name='caffe.LayerParameter.propagate_down', index=8,
number=11, type=8, cpp_type=7, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='include', full_name='caffe.LayerParameter.include', index=9,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='exclude', full_name='caffe.LayerParameter.exclude', index=10,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transform_param', full_name='caffe.LayerParameter.transform_param', index=11,
number=100, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='loss_param', full_name='caffe.LayerParameter.loss_param', index=12,
number=101, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='accuracy_param', full_name='caffe.LayerParameter.accuracy_param', index=13,
number=102, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='argmax_param', full_name='caffe.LayerParameter.argmax_param', index=14,
number=103, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='batch_norm_param', full_name='caffe.LayerParameter.batch_norm_param', index=15,
number=139, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bias_param', full_name='caffe.LayerParameter.bias_param', index=16,
number=141, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='concat_param', full_name='caffe.LayerParameter.concat_param', index=17,
number=104, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='contrastive_loss_param', full_name='caffe.LayerParameter.contrastive_loss_param', index=18,
number=105, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='convolution_param', full_name='caffe.LayerParameter.convolution_param', index=19,
number=106, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data_param', full_name='caffe.LayerParameter.data_param', index=20,
number=107, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout_param', full_name='caffe.LayerParameter.dropout_param', index=21,
number=108, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dummy_data_param', full_name='caffe.LayerParameter.dummy_data_param', index=22,
number=109, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='eltwise_param', full_name='caffe.LayerParameter.eltwise_param', index=23,
number=110, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='elu_param', full_name='caffe.LayerParameter.elu_param', index=24,
number=140, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='embed_param', full_name='caffe.LayerParameter.embed_param', index=25,
number=137, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='exp_param', full_name='caffe.LayerParameter.exp_param', index=26,
number=111, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='flatten_param', full_name='caffe.LayerParameter.flatten_param', index=27,
number=135, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hdf5_data_param', full_name='caffe.LayerParameter.hdf5_data_param', index=28,
number=112, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hdf5_output_param', full_name='caffe.LayerParameter.hdf5_output_param', index=29,
number=113, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hinge_loss_param', full_name='caffe.LayerParameter.hinge_loss_param', index=30,
number=114, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_data_param', full_name='caffe.LayerParameter.image_data_param', index=31,
number=115, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='infogain_loss_param', full_name='caffe.LayerParameter.infogain_loss_param', index=32,
number=116, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inner_product_param', full_name='caffe.LayerParameter.inner_product_param', index=33,
number=117, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='log_param', full_name='caffe.LayerParameter.log_param', index=34,
number=134, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lrn_param', full_name='caffe.LayerParameter.lrn_param', index=35,
number=118, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='memory_data_param', full_name='caffe.LayerParameter.memory_data_param', index=36,
number=119, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mvn_param', full_name='caffe.LayerParameter.mvn_param', index=37,
number=120, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pooling_param', full_name='caffe.LayerParameter.pooling_param', index=38,
number=121, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='power_param', full_name='caffe.LayerParameter.power_param', index=39,
number=122, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='prelu_param', full_name='caffe.LayerParameter.prelu_param', index=40,
number=131, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='python_param', full_name='caffe.LayerParameter.python_param', index=41,
number=130, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reduction_param', full_name='caffe.LayerParameter.reduction_param', index=42,
number=136, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='relu_param', full_name='caffe.LayerParameter.relu_param', index=43,
number=123, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reshape_param', full_name='caffe.LayerParameter.reshape_param', index=44,
number=133, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scale_param', full_name='caffe.LayerParameter.scale_param', index=45,
number=142, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sigmoid_param', full_name='caffe.LayerParameter.sigmoid_param', index=46,
number=124, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='softmax_param', full_name='caffe.LayerParameter.softmax_param', index=47,
number=125, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='spp_param', full_name='caffe.LayerParameter.spp_param', index=48,
number=132, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='slice_param', full_name='caffe.LayerParameter.slice_param', index=49,
number=126, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tanh_param', full_name='caffe.LayerParameter.tanh_param', index=50,
number=127, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='threshold_param', full_name='caffe.LayerParameter.threshold_param', index=51,
number=128, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tile_param', full_name='caffe.LayerParameter.tile_param', index=52,
number=138, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='window_data_param', full_name='caffe.LayerParameter.window_data_param', index=53,
number=129, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2770,
serialized_end=5138,
)
_TRANSFORMATIONPARAMETER = _descriptor.Descriptor(
name='TransformationParameter',
full_name='caffe.TransformationParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='scale', full_name='caffe.TransformationParameter.scale', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mirror', full_name='caffe.TransformationParameter.mirror', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_size', full_name='caffe.TransformationParameter.crop_size', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mean_file', full_name='caffe.TransformationParameter.mean_file', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mean_value', full_name='caffe.TransformationParameter.mean_value', index=4,
number=5, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='force_color', full_name='caffe.TransformationParameter.force_color', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='force_gray', full_name='caffe.TransformationParameter.force_gray', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=5141,
serialized_end=5323,
)
_LOSSPARAMETER = _descriptor.Descriptor(
name='LossParameter',
full_name='caffe.LossParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='ignore_label', full_name='caffe.LossParameter.ignore_label', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='normalization', full_name='caffe.LossParameter.normalization', index=1,
number=3, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='normalize', full_name='caffe.LossParameter.normalize', index=2,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_LOSSPARAMETER_NORMALIZATIONMODE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=5326,
serialized_end=5520,
)
_ACCURACYPARAMETER = _descriptor.Descriptor(
name='AccuracyParameter',
full_name='caffe.AccuracyParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='top_k', full_name='caffe.AccuracyParameter.top_k', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.AccuracyParameter.axis', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ignore_label', full_name='caffe.AccuracyParameter.ignore_label', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=5522,
serialized_end=5598,
)
_ARGMAXPARAMETER = _descriptor.Descriptor(
name='ArgMaxParameter',
full_name='caffe.ArgMaxParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='out_max_val', full_name='caffe.ArgMaxParameter.out_max_val', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='top_k', full_name='caffe.ArgMaxParameter.top_k', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.ArgMaxParameter.axis', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=5600,
serialized_end=5677,
)
_CONCATPARAMETER = _descriptor.Descriptor(
name='ConcatParameter',
full_name='caffe.ConcatParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.ConcatParameter.axis', index=0,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='concat_dim', full_name='caffe.ConcatParameter.concat_dim', index=1,
number=1, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=5679,
serialized_end=5736,
)
_BATCHNORMPARAMETER = _descriptor.Descriptor(
name='BatchNormParameter',
full_name='caffe.BatchNormParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='use_global_stats', full_name='caffe.BatchNormParameter.use_global_stats', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='moving_average_fraction', full_name='caffe.BatchNormParameter.moving_average_fraction', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.999),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='eps', full_name='caffe.BatchNormParameter.eps', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1e-05),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=5738,
serialized_end=5844,
)
_BIASPARAMETER = _descriptor.Descriptor(
name='BiasParameter',
full_name='caffe.BiasParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.BiasParameter.axis', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_axes', full_name='caffe.BiasParameter.num_axes', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='filler', full_name='caffe.BiasParameter.filler', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=5846,
serialized_end=5939,
)
_CONTRASTIVELOSSPARAMETER = _descriptor.Descriptor(
name='ContrastiveLossParameter',
full_name='caffe.ContrastiveLossParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='margin', full_name='caffe.ContrastiveLossParameter.margin', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='legacy_version', full_name='caffe.ContrastiveLossParameter.legacy_version', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=5941,
serialized_end=6017,
)
_CONVOLUTIONPARAMETER = _descriptor.Descriptor(
name='ConvolutionParameter',
full_name='caffe.ConvolutionParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_output', full_name='caffe.ConvolutionParameter.num_output', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bias_term', full_name='caffe.ConvolutionParameter.bias_term', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pad', full_name='caffe.ConvolutionParameter.pad', index=2,
number=3, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_size', full_name='caffe.ConvolutionParameter.kernel_size', index=3,
number=4, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stride', full_name='caffe.ConvolutionParameter.stride', index=4,
number=6, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dilation', full_name='caffe.ConvolutionParameter.dilation', index=5,
number=18, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pad_h', full_name='caffe.ConvolutionParameter.pad_h', index=6,
number=9, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pad_w', full_name='caffe.ConvolutionParameter.pad_w', index=7,
number=10, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_h', full_name='caffe.ConvolutionParameter.kernel_h', index=8,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_w', full_name='caffe.ConvolutionParameter.kernel_w', index=9,
number=12, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stride_h', full_name='caffe.ConvolutionParameter.stride_h', index=10,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stride_w', full_name='caffe.ConvolutionParameter.stride_w', index=11,
number=14, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='group', full_name='caffe.ConvolutionParameter.group', index=12,
number=5, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight_filler', full_name='caffe.ConvolutionParameter.weight_filler', index=13,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bias_filler', full_name='caffe.ConvolutionParameter.bias_filler', index=14,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='engine', full_name='caffe.ConvolutionParameter.engine', index=15,
number=15, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.ConvolutionParameter.axis', index=16,
number=16, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='force_nd_im2col', full_name='caffe.ConvolutionParameter.force_nd_im2col', index=17,
number=17, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_CONVOLUTIONPARAMETER_ENGINE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=6020,
serialized_end=6528,
)
_DATAPARAMETER = _descriptor.Descriptor(
name='DataParameter',
full_name='caffe.DataParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='caffe.DataParameter.source', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='batch_size', full_name='caffe.DataParameter.batch_size', index=1,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rand_skip', full_name='caffe.DataParameter.rand_skip', index=2,
number=7, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='backend', full_name='caffe.DataParameter.backend', index=3,
number=8, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scale', full_name='caffe.DataParameter.scale', index=4,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mean_file', full_name='caffe.DataParameter.mean_file', index=5,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_size', full_name='caffe.DataParameter.crop_size', index=6,
number=5, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mirror', full_name='caffe.DataParameter.mirror', index=7,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='force_encoded_color', full_name='caffe.DataParameter.force_encoded_color', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='prefetch', full_name='caffe.DataParameter.prefetch', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_DATAPARAMETER_DB,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=6531,
serialized_end=6823,
)
_DROPOUTPARAMETER = _descriptor.Descriptor(
name='DropoutParameter',
full_name='caffe.DropoutParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dropout_ratio', full_name='caffe.DropoutParameter.dropout_ratio', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=6825,
serialized_end=6871,
)
_DUMMYDATAPARAMETER = _descriptor.Descriptor(
name='DummyDataParameter',
full_name='caffe.DummyDataParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data_filler', full_name='caffe.DummyDataParameter.data_filler', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shape', full_name='caffe.DummyDataParameter.shape', index=1,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num', full_name='caffe.DummyDataParameter.num', index=2,
number=2, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='channels', full_name='caffe.DummyDataParameter.channels', index=3,
number=3, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='height', full_name='caffe.DummyDataParameter.height', index=4,
number=4, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='width', full_name='caffe.DummyDataParameter.width', index=5,
number=5, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=6874,
serialized_end=7034,
)
_ELTWISEPARAMETER = _descriptor.Descriptor(
name='EltwiseParameter',
full_name='caffe.EltwiseParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='operation', full_name='caffe.EltwiseParameter.operation', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='coeff', full_name='caffe.EltwiseParameter.coeff', index=1,
number=2, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stable_prod_grad', full_name='caffe.EltwiseParameter.stable_prod_grad', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_ELTWISEPARAMETER_ELTWISEOP,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7037,
serialized_end=7202,
)
_ELUPARAMETER = _descriptor.Descriptor(
name='ELUParameter',
full_name='caffe.ELUParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='alpha', full_name='caffe.ELUParameter.alpha', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7204,
serialized_end=7236,
)
_EMBEDPARAMETER = _descriptor.Descriptor(
name='EmbedParameter',
full_name='caffe.EmbedParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_output', full_name='caffe.EmbedParameter.num_output', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='input_dim', full_name='caffe.EmbedParameter.input_dim', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bias_term', full_name='caffe.EmbedParameter.bias_term', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight_filler', full_name='caffe.EmbedParameter.weight_filler', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bias_filler', full_name='caffe.EmbedParameter.bias_filler', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7239,
serialized_end=7411,
)
_EXPPARAMETER = _descriptor.Descriptor(
name='ExpParameter',
full_name='caffe.ExpParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='base', full_name='caffe.ExpParameter.base', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(-1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scale', full_name='caffe.ExpParameter.scale', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shift', full_name='caffe.ExpParameter.shift', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7413,
serialized_end=7481,
)
_FLATTENPARAMETER = _descriptor.Descriptor(
name='FlattenParameter',
full_name='caffe.FlattenParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.FlattenParameter.axis', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='end_axis', full_name='caffe.FlattenParameter.end_axis', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7483,
serialized_end=7540,
)
_HDF5DATAPARAMETER = _descriptor.Descriptor(
name='HDF5DataParameter',
full_name='caffe.HDF5DataParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='caffe.HDF5DataParameter.source', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='batch_size', full_name='caffe.HDF5DataParameter.batch_size', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shuffle', full_name='caffe.HDF5DataParameter.shuffle', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7542,
serialized_end=7621,
)
_HDF5OUTPUTPARAMETER = _descriptor.Descriptor(
name='HDF5OutputParameter',
full_name='caffe.HDF5OutputParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_name', full_name='caffe.HDF5OutputParameter.file_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7623,
serialized_end=7663,
)
_HINGELOSSPARAMETER = _descriptor.Descriptor(
name='HingeLossParameter',
full_name='caffe.HingeLossParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='norm', full_name='caffe.HingeLossParameter.norm', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_HINGELOSSPARAMETER_NORM,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7665,
serialized_end=7759,
)
_IMAGEDATAPARAMETER = _descriptor.Descriptor(
name='ImageDataParameter',
full_name='caffe.ImageDataParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='caffe.ImageDataParameter.source', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='batch_size', full_name='caffe.ImageDataParameter.batch_size', index=1,
number=4, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rand_skip', full_name='caffe.ImageDataParameter.rand_skip', index=2,
number=7, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shuffle', full_name='caffe.ImageDataParameter.shuffle', index=3,
number=8, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='new_height', full_name='caffe.ImageDataParameter.new_height', index=4,
number=9, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='new_width', full_name='caffe.ImageDataParameter.new_width', index=5,
number=10, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_color', full_name='caffe.ImageDataParameter.is_color', index=6,
number=11, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scale', full_name='caffe.ImageDataParameter.scale', index=7,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mean_file', full_name='caffe.ImageDataParameter.mean_file', index=8,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_size', full_name='caffe.ImageDataParameter.crop_size', index=9,
number=5, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mirror', full_name='caffe.ImageDataParameter.mirror', index=10,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='root_folder', full_name='caffe.ImageDataParameter.root_folder', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=7762,
serialized_end=8041,
)
_INFOGAINLOSSPARAMETER = _descriptor.Descriptor(
name='InfogainLossParameter',
full_name='caffe.InfogainLossParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='caffe.InfogainLossParameter.source', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8043,
serialized_end=8082,
)
_INNERPRODUCTPARAMETER = _descriptor.Descriptor(
name='InnerProductParameter',
full_name='caffe.InnerProductParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_output', full_name='caffe.InnerProductParameter.num_output', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bias_term', full_name='caffe.InnerProductParameter.bias_term', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight_filler', full_name='caffe.InnerProductParameter.weight_filler', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bias_filler', full_name='caffe.InnerProductParameter.bias_filler', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.InnerProductParameter.axis', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8085,
serialized_end=8262,
)
_LOGPARAMETER = _descriptor.Descriptor(
name='LogParameter',
full_name='caffe.LogParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='base', full_name='caffe.LogParameter.base', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(-1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scale', full_name='caffe.LogParameter.scale', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shift', full_name='caffe.LogParameter.shift', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8264,
serialized_end=8332,
)
_LRNPARAMETER = _descriptor.Descriptor(
name='LRNParameter',
full_name='caffe.LRNParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='local_size', full_name='caffe.LRNParameter.local_size', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='alpha', full_name='caffe.LRNParameter.alpha', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='beta', full_name='caffe.LRNParameter.beta', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.75),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='norm_region', full_name='caffe.LRNParameter.norm_region', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='k', full_name='caffe.LRNParameter.k', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='engine', full_name='caffe.LRNParameter.engine', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_LRNPARAMETER_NORMREGION,
_LRNPARAMETER_ENGINE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8335,
serialized_end=8647,
)
_MEMORYDATAPARAMETER = _descriptor.Descriptor(
name='MemoryDataParameter',
full_name='caffe.MemoryDataParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='batch_size', full_name='caffe.MemoryDataParameter.batch_size', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='channels', full_name='caffe.MemoryDataParameter.channels', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='height', full_name='caffe.MemoryDataParameter.height', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='width', full_name='caffe.MemoryDataParameter.width', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8649,
serialized_end=8739,
)
_MVNPARAMETER = _descriptor.Descriptor(
name='MVNParameter',
full_name='caffe.MVNParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='normalize_variance', full_name='caffe.MVNParameter.normalize_variance', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='across_channels', full_name='caffe.MVNParameter.across_channels', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='eps', full_name='caffe.MVNParameter.eps', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1e-09),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8741,
serialized_end=8841,
)
_POOLINGPARAMETER = _descriptor.Descriptor(
name='PoolingParameter',
full_name='caffe.PoolingParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pool', full_name='caffe.PoolingParameter.pool', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pad', full_name='caffe.PoolingParameter.pad', index=1,
number=4, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pad_h', full_name='caffe.PoolingParameter.pad_h', index=2,
number=9, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pad_w', full_name='caffe.PoolingParameter.pad_w', index=3,
number=10, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_size', full_name='caffe.PoolingParameter.kernel_size', index=4,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_h', full_name='caffe.PoolingParameter.kernel_h', index=5,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_w', full_name='caffe.PoolingParameter.kernel_w', index=6,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stride', full_name='caffe.PoolingParameter.stride', index=7,
number=3, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stride_h', full_name='caffe.PoolingParameter.stride_h', index=8,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stride_w', full_name='caffe.PoolingParameter.stride_w', index=9,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='engine', full_name='caffe.PoolingParameter.engine', index=10,
number=11, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='global_pooling', full_name='caffe.PoolingParameter.global_pooling', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_POOLINGPARAMETER_POOLMETHOD,
_POOLINGPARAMETER_ENGINE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=8844,
serialized_end=9262,
)
_POWERPARAMETER = _descriptor.Descriptor(
name='PowerParameter',
full_name='caffe.PowerParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='power', full_name='caffe.PowerParameter.power', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scale', full_name='caffe.PowerParameter.scale', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shift', full_name='caffe.PowerParameter.shift', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=9264,
serialized_end=9334,
)
_PYTHONPARAMETER = _descriptor.Descriptor(
name='PythonParameter',
full_name='caffe.PythonParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='module', full_name='caffe.PythonParameter.module', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='layer', full_name='caffe.PythonParameter.layer', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='param_str', full_name='caffe.PythonParameter.param_str', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='share_in_parallel', full_name='caffe.PythonParameter.share_in_parallel', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=9336,
serialized_end=9439,
)
_REDUCTIONPARAMETER = _descriptor.Descriptor(
name='ReductionParameter',
full_name='caffe.ReductionParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='operation', full_name='caffe.ReductionParameter.operation', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.ReductionParameter.axis', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='coeff', full_name='caffe.ReductionParameter.coeff', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_REDUCTIONPARAMETER_REDUCTIONOP,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=9442,
serialized_end=9615,
)
_RELUPARAMETER = _descriptor.Descriptor(
name='ReLUParameter',
full_name='caffe.ReLUParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='negative_slope', full_name='caffe.ReLUParameter.negative_slope', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='engine', full_name='caffe.ReLUParameter.engine', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_RELUPARAMETER_ENGINE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=9618,
serialized_end=9759,
)
_RESHAPEPARAMETER = _descriptor.Descriptor(
name='ReshapeParameter',
full_name='caffe.ReshapeParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shape', full_name='caffe.ReshapeParameter.shape', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.ReshapeParameter.axis', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_axes', full_name='caffe.ReshapeParameter.num_axes', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=9761,
serialized_end=9851,
)
_SCALEPARAMETER = _descriptor.Descriptor(
name='ScaleParameter',
full_name='caffe.ScaleParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.ScaleParameter.axis', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_axes', full_name='caffe.ScaleParameter.num_axes', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='filler', full_name='caffe.ScaleParameter.filler', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bias_term', full_name='caffe.ScaleParameter.bias_term', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bias_filler', full_name='caffe.ScaleParameter.bias_filler', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=9854,
serialized_end=10019,
)
_SIGMOIDPARAMETER = _descriptor.Descriptor(
name='SigmoidParameter',
full_name='caffe.SigmoidParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='engine', full_name='caffe.SigmoidParameter.engine', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SIGMOIDPARAMETER_ENGINE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=10021,
serialized_end=10141,
)
_SLICEPARAMETER = _descriptor.Descriptor(
name='SliceParameter',
full_name='caffe.SliceParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.SliceParameter.axis', index=0,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='slice_point', full_name='caffe.SliceParameter.slice_point', index=1,
number=2, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='slice_dim', full_name='caffe.SliceParameter.slice_dim', index=2,
number=1, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=10143,
serialized_end=10219,
)
_SOFTMAXPARAMETER = _descriptor.Descriptor(
name='SoftmaxParameter',
full_name='caffe.SoftmaxParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='engine', full_name='caffe.SoftmaxParameter.engine', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.SoftmaxParameter.axis', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SOFTMAXPARAMETER_ENGINE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=10222,
serialized_end=10359,
)
_TANHPARAMETER = _descriptor.Descriptor(
name='TanHParameter',
full_name='caffe.TanHParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='engine', full_name='caffe.TanHParameter.engine', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_TANHPARAMETER_ENGINE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=10361,
serialized_end=10475,
)
_TILEPARAMETER = _descriptor.Descriptor(
name='TileParameter',
full_name='caffe.TileParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='axis', full_name='caffe.TileParameter.axis', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tiles', full_name='caffe.TileParameter.tiles', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=10477,
serialized_end=10524,
)
_THRESHOLDPARAMETER = _descriptor.Descriptor(
name='ThresholdParameter',
full_name='caffe.ThresholdParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='threshold', full_name='caffe.ThresholdParameter.threshold', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=10526,
serialized_end=10568,
)
_WINDOWDATAPARAMETER = _descriptor.Descriptor(
name='WindowDataParameter',
full_name='caffe.WindowDataParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='caffe.WindowDataParameter.source', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scale', full_name='caffe.WindowDataParameter.scale', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mean_file', full_name='caffe.WindowDataParameter.mean_file', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='batch_size', full_name='caffe.WindowDataParameter.batch_size', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_size', full_name='caffe.WindowDataParameter.crop_size', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mirror', full_name='caffe.WindowDataParameter.mirror', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fg_threshold', full_name='caffe.WindowDataParameter.fg_threshold', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bg_threshold', full_name='caffe.WindowDataParameter.bg_threshold', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fg_fraction', full_name='caffe.WindowDataParameter.fg_fraction', index=8,
number=9, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.25),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='context_pad', full_name='caffe.WindowDataParameter.context_pad', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_mode', full_name='caffe.WindowDataParameter.crop_mode', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("warp").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cache_images', full_name='caffe.WindowDataParameter.cache_images', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='root_folder', full_name='caffe.WindowDataParameter.root_folder', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=10571,
serialized_end=10892,
)
_SPPPARAMETER = _descriptor.Descriptor(
name='SPPParameter',
full_name='caffe.SPPParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pyramid_height', full_name='caffe.SPPParameter.pyramid_height', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pool', full_name='caffe.SPPParameter.pool', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='engine', full_name='caffe.SPPParameter.engine', index=2,
number=6, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SPPPARAMETER_POOLMETHOD,
_SPPPARAMETER_ENGINE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=10895,
serialized_end=11130,
)
_V1LAYERPARAMETER = _descriptor.Descriptor(
name='V1LayerParameter',
full_name='caffe.V1LayerParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bottom', full_name='caffe.V1LayerParameter.bottom', index=0,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='top', full_name='caffe.V1LayerParameter.top', index=1,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='caffe.V1LayerParameter.name', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='include', full_name='caffe.V1LayerParameter.include', index=3,
number=32, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='exclude', full_name='caffe.V1LayerParameter.exclude', index=4,
number=33, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='caffe.V1LayerParameter.type', index=5,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='blobs', full_name='caffe.V1LayerParameter.blobs', index=6,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='param', full_name='caffe.V1LayerParameter.param', index=7,
number=1001, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='blob_share_mode', full_name='caffe.V1LayerParameter.blob_share_mode', index=8,
number=1002, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='blobs_lr', full_name='caffe.V1LayerParameter.blobs_lr', index=9,
number=7, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight_decay', full_name='caffe.V1LayerParameter.weight_decay', index=10,
number=8, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='loss_weight', full_name='caffe.V1LayerParameter.loss_weight', index=11,
number=35, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='accuracy_param', full_name='caffe.V1LayerParameter.accuracy_param', index=12,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='argmax_param', full_name='caffe.V1LayerParameter.argmax_param', index=13,
number=23, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='concat_param', full_name='caffe.V1LayerParameter.concat_param', index=14,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='contrastive_loss_param', full_name='caffe.V1LayerParameter.contrastive_loss_param', index=15,
number=40, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='convolution_param', full_name='caffe.V1LayerParameter.convolution_param', index=16,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data_param', full_name='caffe.V1LayerParameter.data_param', index=17,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout_param', full_name='caffe.V1LayerParameter.dropout_param', index=18,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dummy_data_param', full_name='caffe.V1LayerParameter.dummy_data_param', index=19,
number=26, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='eltwise_param', full_name='caffe.V1LayerParameter.eltwise_param', index=20,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='exp_param', full_name='caffe.V1LayerParameter.exp_param', index=21,
number=41, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hdf5_data_param', full_name='caffe.V1LayerParameter.hdf5_data_param', index=22,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hdf5_output_param', full_name='caffe.V1LayerParameter.hdf5_output_param', index=23,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hinge_loss_param', full_name='caffe.V1LayerParameter.hinge_loss_param', index=24,
number=29, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_data_param', full_name='caffe.V1LayerParameter.image_data_param', index=25,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='infogain_loss_param', full_name='caffe.V1LayerParameter.infogain_loss_param', index=26,
number=16, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inner_product_param', full_name='caffe.V1LayerParameter.inner_product_param', index=27,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='lrn_param', full_name='caffe.V1LayerParameter.lrn_param', index=28,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='memory_data_param', full_name='caffe.V1LayerParameter.memory_data_param', index=29,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mvn_param', full_name='caffe.V1LayerParameter.mvn_param', index=30,
number=34, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pooling_param', full_name='caffe.V1LayerParameter.pooling_param', index=31,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='power_param', full_name='caffe.V1LayerParameter.power_param', index=32,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='relu_param', full_name='caffe.V1LayerParameter.relu_param', index=33,
number=30, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sigmoid_param', full_name='caffe.V1LayerParameter.sigmoid_param', index=34,
number=38, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='softmax_param', full_name='caffe.V1LayerParameter.softmax_param', index=35,
number=39, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='slice_param', full_name='caffe.V1LayerParameter.slice_param', index=36,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tanh_param', full_name='caffe.V1LayerParameter.tanh_param', index=37,
number=37, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='threshold_param', full_name='caffe.V1LayerParameter.threshold_param', index=38,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='window_data_param', full_name='caffe.V1LayerParameter.window_data_param', index=39,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transform_param', full_name='caffe.V1LayerParameter.transform_param', index=40,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='loss_param', full_name='caffe.V1LayerParameter.loss_param', index=41,
number=42, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='layer', full_name='caffe.V1LayerParameter.layer', index=42,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_V1LAYERPARAMETER_LAYERTYPE,
_V1LAYERPARAMETER_DIMCHECKMODE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=11133,
serialized_end=13661,
)
_V0LAYERPARAMETER = _descriptor.Descriptor(
name='V0LayerParameter',
full_name='caffe.V0LayerParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='caffe.V0LayerParameter.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='caffe.V0LayerParameter.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_output', full_name='caffe.V0LayerParameter.num_output', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='biasterm', full_name='caffe.V0LayerParameter.biasterm', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight_filler', full_name='caffe.V0LayerParameter.weight_filler', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bias_filler', full_name='caffe.V0LayerParameter.bias_filler', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pad', full_name='caffe.V0LayerParameter.pad', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernelsize', full_name='caffe.V0LayerParameter.kernelsize', index=7,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='group', full_name='caffe.V0LayerParameter.group', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stride', full_name='caffe.V0LayerParameter.stride', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pool', full_name='caffe.V0LayerParameter.pool', index=10,
number=11, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout_ratio', full_name='caffe.V0LayerParameter.dropout_ratio', index=11,
number=12, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='local_size', full_name='caffe.V0LayerParameter.local_size', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=5,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='alpha', full_name='caffe.V0LayerParameter.alpha', index=13,
number=14, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='beta', full_name='caffe.V0LayerParameter.beta', index=14,
number=15, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.75),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='k', full_name='caffe.V0LayerParameter.k', index=15,
number=22, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source', full_name='caffe.V0LayerParameter.source', index=16,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scale', full_name='caffe.V0LayerParameter.scale', index=17,
number=17, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='meanfile', full_name='caffe.V0LayerParameter.meanfile', index=18,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='batchsize', full_name='caffe.V0LayerParameter.batchsize', index=19,
number=19, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cropsize', full_name='caffe.V0LayerParameter.cropsize', index=20,
number=20, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mirror', full_name='caffe.V0LayerParameter.mirror', index=21,
number=21, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='blobs', full_name='caffe.V0LayerParameter.blobs', index=22,
number=50, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='blobs_lr', full_name='caffe.V0LayerParameter.blobs_lr', index=23,
number=51, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight_decay', full_name='caffe.V0LayerParameter.weight_decay', index=24,
number=52, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rand_skip', full_name='caffe.V0LayerParameter.rand_skip', index=25,
number=53, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='det_fg_threshold', full_name='caffe.V0LayerParameter.det_fg_threshold', index=26,
number=54, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='det_bg_threshold', full_name='caffe.V0LayerParameter.det_bg_threshold', index=27,
number=55, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='det_fg_fraction', full_name='caffe.V0LayerParameter.det_fg_fraction', index=28,
number=56, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.25),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='det_context_pad', full_name='caffe.V0LayerParameter.det_context_pad', index=29,
number=58, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='det_crop_mode', full_name='caffe.V0LayerParameter.det_crop_mode', index=30,
number=59, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("warp").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='new_num', full_name='caffe.V0LayerParameter.new_num', index=31,
number=60, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='new_channels', full_name='caffe.V0LayerParameter.new_channels', index=32,
number=61, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='new_height', full_name='caffe.V0LayerParameter.new_height', index=33,
number=62, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='new_width', full_name='caffe.V0LayerParameter.new_width', index=34,
number=63, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shuffle_images', full_name='caffe.V0LayerParameter.shuffle_images', index=35,
number=64, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='concat_dim', full_name='caffe.V0LayerParameter.concat_dim', index=36,
number=65, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hdf5_output_param', full_name='caffe.V0LayerParameter.hdf5_output_param', index=37,
number=1001, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_V0LAYERPARAMETER_POOLMETHOD,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=13664,
serialized_end=14685,
)
_PRELUPARAMETER = _descriptor.Descriptor(
name='PReLUParameter',
full_name='caffe.PReLUParameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='filler', full_name='caffe.PReLUParameter.filler', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='channel_shared', full_name='caffe.PReLUParameter.channel_shared', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=14687,
serialized_end=14774,
)
_BLOBPROTO.fields_by_name['shape'].message_type = _BLOBSHAPE
_BLOBPROTOVECTOR.fields_by_name['blobs'].message_type = _BLOBPROTO
_FILLERPARAMETER.fields_by_name['variance_norm'].enum_type = _FILLERPARAMETER_VARIANCENORM
_FILLERPARAMETER_VARIANCENORM.containing_type = _FILLERPARAMETER
_NETPARAMETER.fields_by_name['input_shape'].message_type = _BLOBSHAPE
_NETPARAMETER.fields_by_name['state'].message_type = _NETSTATE
_NETPARAMETER.fields_by_name['layer'].message_type = _LAYERPARAMETER
_NETPARAMETER.fields_by_name['layers'].message_type = _V1LAYERPARAMETER
_SOLVERPARAMETER.fields_by_name['net_param'].message_type = _NETPARAMETER
_SOLVERPARAMETER.fields_by_name['train_net_param'].message_type = _NETPARAMETER
_SOLVERPARAMETER.fields_by_name['test_net_param'].message_type = _NETPARAMETER
_SOLVERPARAMETER.fields_by_name['train_state'].message_type = _NETSTATE
_SOLVERPARAMETER.fields_by_name['test_state'].message_type = _NETSTATE
_SOLVERPARAMETER.fields_by_name['snapshot_format'].enum_type = _SOLVERPARAMETER_SNAPSHOTFORMAT
_SOLVERPARAMETER.fields_by_name['solver_mode'].enum_type = _SOLVERPARAMETER_SOLVERMODE
_SOLVERPARAMETER.fields_by_name['solver_type'].enum_type = _SOLVERPARAMETER_SOLVERTYPE
_SOLVERPARAMETER_SNAPSHOTFORMAT.containing_type = _SOLVERPARAMETER
_SOLVERPARAMETER_SOLVERMODE.containing_type = _SOLVERPARAMETER
_SOLVERPARAMETER_SOLVERTYPE.containing_type = _SOLVERPARAMETER
_SOLVERSTATE.fields_by_name['history'].message_type = _BLOBPROTO
_NETSTATE.fields_by_name['phase'].enum_type = _PHASE
_NETSTATERULE.fields_by_name['phase'].enum_type = _PHASE
_PARAMSPEC.fields_by_name['share_mode'].enum_type = _PARAMSPEC_DIMCHECKMODE
_PARAMSPEC_DIMCHECKMODE.containing_type = _PARAMSPEC
_LAYERPARAMETER.fields_by_name['phase'].enum_type = _PHASE
_LAYERPARAMETER.fields_by_name['param'].message_type = _PARAMSPEC
_LAYERPARAMETER.fields_by_name['blobs'].message_type = _BLOBPROTO
_LAYERPARAMETER.fields_by_name['include'].message_type = _NETSTATERULE
_LAYERPARAMETER.fields_by_name['exclude'].message_type = _NETSTATERULE
_LAYERPARAMETER.fields_by_name['transform_param'].message_type = _TRANSFORMATIONPARAMETER
_LAYERPARAMETER.fields_by_name['loss_param'].message_type = _LOSSPARAMETER
_LAYERPARAMETER.fields_by_name['accuracy_param'].message_type = _ACCURACYPARAMETER
_LAYERPARAMETER.fields_by_name['argmax_param'].message_type = _ARGMAXPARAMETER
_LAYERPARAMETER.fields_by_name['batch_norm_param'].message_type = _BATCHNORMPARAMETER
_LAYERPARAMETER.fields_by_name['bias_param'].message_type = _BIASPARAMETER
_LAYERPARAMETER.fields_by_name['concat_param'].message_type = _CONCATPARAMETER
_LAYERPARAMETER.fields_by_name['contrastive_loss_param'].message_type = _CONTRASTIVELOSSPARAMETER
_LAYERPARAMETER.fields_by_name['convolution_param'].message_type = _CONVOLUTIONPARAMETER
_LAYERPARAMETER.fields_by_name['data_param'].message_type = _DATAPARAMETER
_LAYERPARAMETER.fields_by_name['dropout_param'].message_type = _DROPOUTPARAMETER
_LAYERPARAMETER.fields_by_name['dummy_data_param'].message_type = _DUMMYDATAPARAMETER
_LAYERPARAMETER.fields_by_name['eltwise_param'].message_type = _ELTWISEPARAMETER
_LAYERPARAMETER.fields_by_name['elu_param'].message_type = _ELUPARAMETER
_LAYERPARAMETER.fields_by_name['embed_param'].message_type = _EMBEDPARAMETER
_LAYERPARAMETER.fields_by_name['exp_param'].message_type = _EXPPARAMETER
_LAYERPARAMETER.fields_by_name['flatten_param'].message_type = _FLATTENPARAMETER
_LAYERPARAMETER.fields_by_name['hdf5_data_param'].message_type = _HDF5DATAPARAMETER
_LAYERPARAMETER.fields_by_name['hdf5_output_param'].message_type = _HDF5OUTPUTPARAMETER
_LAYERPARAMETER.fields_by_name['hinge_loss_param'].message_type = _HINGELOSSPARAMETER
_LAYERPARAMETER.fields_by_name['image_data_param'].message_type = _IMAGEDATAPARAMETER
_LAYERPARAMETER.fields_by_name['infogain_loss_param'].message_type = _INFOGAINLOSSPARAMETER
_LAYERPARAMETER.fields_by_name['inner_product_param'].message_type = _INNERPRODUCTPARAMETER
_LAYERPARAMETER.fields_by_name['log_param'].message_type = _LOGPARAMETER
_LAYERPARAMETER.fields_by_name['lrn_param'].message_type = _LRNPARAMETER
_LAYERPARAMETER.fields_by_name['memory_data_param'].message_type = _MEMORYDATAPARAMETER
_LAYERPARAMETER.fields_by_name['mvn_param'].message_type = _MVNPARAMETER
_LAYERPARAMETER.fields_by_name['pooling_param'].message_type = _POOLINGPARAMETER
_LAYERPARAMETER.fields_by_name['power_param'].message_type = _POWERPARAMETER
_LAYERPARAMETER.fields_by_name['prelu_param'].message_type = _PRELUPARAMETER
_LAYERPARAMETER.fields_by_name['python_param'].message_type = _PYTHONPARAMETER
_LAYERPARAMETER.fields_by_name['reduction_param'].message_type = _REDUCTIONPARAMETER
_LAYERPARAMETER.fields_by_name['relu_param'].message_type = _RELUPARAMETER
_LAYERPARAMETER.fields_by_name['reshape_param'].message_type = _RESHAPEPARAMETER
_LAYERPARAMETER.fields_by_name['scale_param'].message_type = _SCALEPARAMETER
_LAYERPARAMETER.fields_by_name['sigmoid_param'].message_type = _SIGMOIDPARAMETER
_LAYERPARAMETER.fields_by_name['softmax_param'].message_type = _SOFTMAXPARAMETER
_LAYERPARAMETER.fields_by_name['spp_param'].message_type = _SPPPARAMETER
_LAYERPARAMETER.fields_by_name['slice_param'].message_type = _SLICEPARAMETER
_LAYERPARAMETER.fields_by_name['tanh_param'].message_type = _TANHPARAMETER
_LAYERPARAMETER.fields_by_name['threshold_param'].message_type = _THRESHOLDPARAMETER
_LAYERPARAMETER.fields_by_name['tile_param'].message_type = _TILEPARAMETER
_LAYERPARAMETER.fields_by_name['window_data_param'].message_type = _WINDOWDATAPARAMETER
_LOSSPARAMETER.fields_by_name['normalization'].enum_type = _LOSSPARAMETER_NORMALIZATIONMODE
_LOSSPARAMETER_NORMALIZATIONMODE.containing_type = _LOSSPARAMETER
_BIASPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER
_CONVOLUTIONPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER
_CONVOLUTIONPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER
_CONVOLUTIONPARAMETER.fields_by_name['engine'].enum_type = _CONVOLUTIONPARAMETER_ENGINE
_CONVOLUTIONPARAMETER_ENGINE.containing_type = _CONVOLUTIONPARAMETER
_DATAPARAMETER.fields_by_name['backend'].enum_type = _DATAPARAMETER_DB
_DATAPARAMETER_DB.containing_type = _DATAPARAMETER
_DUMMYDATAPARAMETER.fields_by_name['data_filler'].message_type = _FILLERPARAMETER
_DUMMYDATAPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE
_ELTWISEPARAMETER.fields_by_name['operation'].enum_type = _ELTWISEPARAMETER_ELTWISEOP
_ELTWISEPARAMETER_ELTWISEOP.containing_type = _ELTWISEPARAMETER
_EMBEDPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER
_EMBEDPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER
_HINGELOSSPARAMETER.fields_by_name['norm'].enum_type = _HINGELOSSPARAMETER_NORM
_HINGELOSSPARAMETER_NORM.containing_type = _HINGELOSSPARAMETER
_INNERPRODUCTPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER
_INNERPRODUCTPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER
_LRNPARAMETER.fields_by_name['norm_region'].enum_type = _LRNPARAMETER_NORMREGION
_LRNPARAMETER.fields_by_name['engine'].enum_type = _LRNPARAMETER_ENGINE
_LRNPARAMETER_NORMREGION.containing_type = _LRNPARAMETER
_LRNPARAMETER_ENGINE.containing_type = _LRNPARAMETER
_POOLINGPARAMETER.fields_by_name['pool'].enum_type = _POOLINGPARAMETER_POOLMETHOD
_POOLINGPARAMETER.fields_by_name['engine'].enum_type = _POOLINGPARAMETER_ENGINE
_POOLINGPARAMETER_POOLMETHOD.containing_type = _POOLINGPARAMETER
_POOLINGPARAMETER_ENGINE.containing_type = _POOLINGPARAMETER
_REDUCTIONPARAMETER.fields_by_name['operation'].enum_type = _REDUCTIONPARAMETER_REDUCTIONOP
_REDUCTIONPARAMETER_REDUCTIONOP.containing_type = _REDUCTIONPARAMETER
_RELUPARAMETER.fields_by_name['engine'].enum_type = _RELUPARAMETER_ENGINE
_RELUPARAMETER_ENGINE.containing_type = _RELUPARAMETER
_RESHAPEPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE
_SCALEPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER
_SCALEPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER
_SIGMOIDPARAMETER.fields_by_name['engine'].enum_type = _SIGMOIDPARAMETER_ENGINE
_SIGMOIDPARAMETER_ENGINE.containing_type = _SIGMOIDPARAMETER
_SOFTMAXPARAMETER.fields_by_name['engine'].enum_type = _SOFTMAXPARAMETER_ENGINE
_SOFTMAXPARAMETER_ENGINE.containing_type = _SOFTMAXPARAMETER
_TANHPARAMETER.fields_by_name['engine'].enum_type = _TANHPARAMETER_ENGINE
_TANHPARAMETER_ENGINE.containing_type = _TANHPARAMETER
_SPPPARAMETER.fields_by_name['pool'].enum_type = _SPPPARAMETER_POOLMETHOD
_SPPPARAMETER.fields_by_name['engine'].enum_type = _SPPPARAMETER_ENGINE
_SPPPARAMETER_POOLMETHOD.containing_type = _SPPPARAMETER
_SPPPARAMETER_ENGINE.containing_type = _SPPPARAMETER
_V1LAYERPARAMETER.fields_by_name['include'].message_type = _NETSTATERULE
_V1LAYERPARAMETER.fields_by_name['exclude'].message_type = _NETSTATERULE
_V1LAYERPARAMETER.fields_by_name['type'].enum_type = _V1LAYERPARAMETER_LAYERTYPE
_V1LAYERPARAMETER.fields_by_name['blobs'].message_type = _BLOBPROTO
_V1LAYERPARAMETER.fields_by_name['blob_share_mode'].enum_type = _V1LAYERPARAMETER_DIMCHECKMODE
_V1LAYERPARAMETER.fields_by_name['accuracy_param'].message_type = _ACCURACYPARAMETER
_V1LAYERPARAMETER.fields_by_name['argmax_param'].message_type = _ARGMAXPARAMETER
_V1LAYERPARAMETER.fields_by_name['concat_param'].message_type = _CONCATPARAMETER
_V1LAYERPARAMETER.fields_by_name['contrastive_loss_param'].message_type = _CONTRASTIVELOSSPARAMETER
_V1LAYERPARAMETER.fields_by_name['convolution_param'].message_type = _CONVOLUTIONPARAMETER
_V1LAYERPARAMETER.fields_by_name['data_param'].message_type = _DATAPARAMETER
_V1LAYERPARAMETER.fields_by_name['dropout_param'].message_type = _DROPOUTPARAMETER
_V1LAYERPARAMETER.fields_by_name['dummy_data_param'].message_type = _DUMMYDATAPARAMETER
_V1LAYERPARAMETER.fields_by_name['eltwise_param'].message_type = _ELTWISEPARAMETER
_V1LAYERPARAMETER.fields_by_name['exp_param'].message_type = _EXPPARAMETER
_V1LAYERPARAMETER.fields_by_name['hdf5_data_param'].message_type = _HDF5DATAPARAMETER
_V1LAYERPARAMETER.fields_by_name['hdf5_output_param'].message_type = _HDF5OUTPUTPARAMETER
_V1LAYERPARAMETER.fields_by_name['hinge_loss_param'].message_type = _HINGELOSSPARAMETER
_V1LAYERPARAMETER.fields_by_name['image_data_param'].message_type = _IMAGEDATAPARAMETER
_V1LAYERPARAMETER.fields_by_name['infogain_loss_param'].message_type = _INFOGAINLOSSPARAMETER
_V1LAYERPARAMETER.fields_by_name['inner_product_param'].message_type = _INNERPRODUCTPARAMETER
_V1LAYERPARAMETER.fields_by_name['lrn_param'].message_type = _LRNPARAMETER
_V1LAYERPARAMETER.fields_by_name['memory_data_param'].message_type = _MEMORYDATAPARAMETER
_V1LAYERPARAMETER.fields_by_name['mvn_param'].message_type = _MVNPARAMETER
_V1LAYERPARAMETER.fields_by_name['pooling_param'].message_type = _POOLINGPARAMETER
_V1LAYERPARAMETER.fields_by_name['power_param'].message_type = _POWERPARAMETER
_V1LAYERPARAMETER.fields_by_name['relu_param'].message_type = _RELUPARAMETER
_V1LAYERPARAMETER.fields_by_name['sigmoid_param'].message_type = _SIGMOIDPARAMETER
_V1LAYERPARAMETER.fields_by_name['softmax_param'].message_type = _SOFTMAXPARAMETER
_V1LAYERPARAMETER.fields_by_name['slice_param'].message_type = _SLICEPARAMETER
_V1LAYERPARAMETER.fields_by_name['tanh_param'].message_type = _TANHPARAMETER
_V1LAYERPARAMETER.fields_by_name['threshold_param'].message_type = _THRESHOLDPARAMETER
_V1LAYERPARAMETER.fields_by_name['window_data_param'].message_type = _WINDOWDATAPARAMETER
_V1LAYERPARAMETER.fields_by_name['transform_param'].message_type = _TRANSFORMATIONPARAMETER
_V1LAYERPARAMETER.fields_by_name['loss_param'].message_type = _LOSSPARAMETER
_V1LAYERPARAMETER.fields_by_name['layer'].message_type = _V0LAYERPARAMETER
_V1LAYERPARAMETER_LAYERTYPE.containing_type = _V1LAYERPARAMETER
_V1LAYERPARAMETER_DIMCHECKMODE.containing_type = _V1LAYERPARAMETER
_V0LAYERPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER
_V0LAYERPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER
_V0LAYERPARAMETER.fields_by_name['pool'].enum_type = _V0LAYERPARAMETER_POOLMETHOD
_V0LAYERPARAMETER.fields_by_name['blobs'].message_type = _BLOBPROTO
_V0LAYERPARAMETER.fields_by_name['hdf5_output_param'].message_type = _HDF5OUTPUTPARAMETER
_V0LAYERPARAMETER_POOLMETHOD.containing_type = _V0LAYERPARAMETER
_PRELUPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER
DESCRIPTOR.message_types_by_name['BlobShape'] = _BLOBSHAPE
DESCRIPTOR.message_types_by_name['BlobProto'] = _BLOBPROTO
DESCRIPTOR.message_types_by_name['BlobProtoVector'] = _BLOBPROTOVECTOR
DESCRIPTOR.message_types_by_name['Datum'] = _DATUM
DESCRIPTOR.message_types_by_name['FillerParameter'] = _FILLERPARAMETER
DESCRIPTOR.message_types_by_name['NetParameter'] = _NETPARAMETER
DESCRIPTOR.message_types_by_name['SolverParameter'] = _SOLVERPARAMETER
DESCRIPTOR.message_types_by_name['SolverState'] = _SOLVERSTATE
DESCRIPTOR.message_types_by_name['NetState'] = _NETSTATE
DESCRIPTOR.message_types_by_name['NetStateRule'] = _NETSTATERULE
DESCRIPTOR.message_types_by_name['ParamSpec'] = _PARAMSPEC
DESCRIPTOR.message_types_by_name['LayerParameter'] = _LAYERPARAMETER
DESCRIPTOR.message_types_by_name['TransformationParameter'] = _TRANSFORMATIONPARAMETER
DESCRIPTOR.message_types_by_name['LossParameter'] = _LOSSPARAMETER
DESCRIPTOR.message_types_by_name['AccuracyParameter'] = _ACCURACYPARAMETER
DESCRIPTOR.message_types_by_name['ArgMaxParameter'] = _ARGMAXPARAMETER
DESCRIPTOR.message_types_by_name['ConcatParameter'] = _CONCATPARAMETER
DESCRIPTOR.message_types_by_name['BatchNormParameter'] = _BATCHNORMPARAMETER
DESCRIPTOR.message_types_by_name['BiasParameter'] = _BIASPARAMETER
DESCRIPTOR.message_types_by_name['ContrastiveLossParameter'] = _CONTRASTIVELOSSPARAMETER
DESCRIPTOR.message_types_by_name['ConvolutionParameter'] = _CONVOLUTIONPARAMETER
DESCRIPTOR.message_types_by_name['DataParameter'] = _DATAPARAMETER
DESCRIPTOR.message_types_by_name['DropoutParameter'] = _DROPOUTPARAMETER
DESCRIPTOR.message_types_by_name['DummyDataParameter'] = _DUMMYDATAPARAMETER
DESCRIPTOR.message_types_by_name['EltwiseParameter'] = _ELTWISEPARAMETER
DESCRIPTOR.message_types_by_name['ELUParameter'] = _ELUPARAMETER
DESCRIPTOR.message_types_by_name['EmbedParameter'] = _EMBEDPARAMETER
DESCRIPTOR.message_types_by_name['ExpParameter'] = _EXPPARAMETER
DESCRIPTOR.message_types_by_name['FlattenParameter'] = _FLATTENPARAMETER
DESCRIPTOR.message_types_by_name['HDF5DataParameter'] = _HDF5DATAPARAMETER
DESCRIPTOR.message_types_by_name['HDF5OutputParameter'] = _HDF5OUTPUTPARAMETER
DESCRIPTOR.message_types_by_name['HingeLossParameter'] = _HINGELOSSPARAMETER
DESCRIPTOR.message_types_by_name['ImageDataParameter'] = _IMAGEDATAPARAMETER
DESCRIPTOR.message_types_by_name['InfogainLossParameter'] = _INFOGAINLOSSPARAMETER
DESCRIPTOR.message_types_by_name['InnerProductParameter'] = _INNERPRODUCTPARAMETER
DESCRIPTOR.message_types_by_name['LogParameter'] = _LOGPARAMETER
DESCRIPTOR.message_types_by_name['LRNParameter'] = _LRNPARAMETER
DESCRIPTOR.message_types_by_name['MemoryDataParameter'] = _MEMORYDATAPARAMETER
DESCRIPTOR.message_types_by_name['MVNParameter'] = _MVNPARAMETER
DESCRIPTOR.message_types_by_name['PoolingParameter'] = _POOLINGPARAMETER
DESCRIPTOR.message_types_by_name['PowerParameter'] = _POWERPARAMETER
DESCRIPTOR.message_types_by_name['PythonParameter'] = _PYTHONPARAMETER
DESCRIPTOR.message_types_by_name['ReductionParameter'] = _REDUCTIONPARAMETER
DESCRIPTOR.message_types_by_name['ReLUParameter'] = _RELUPARAMETER
DESCRIPTOR.message_types_by_name['ReshapeParameter'] = _RESHAPEPARAMETER
DESCRIPTOR.message_types_by_name['ScaleParameter'] = _SCALEPARAMETER
DESCRIPTOR.message_types_by_name['SigmoidParameter'] = _SIGMOIDPARAMETER
DESCRIPTOR.message_types_by_name['SliceParameter'] = _SLICEPARAMETER
DESCRIPTOR.message_types_by_name['SoftmaxParameter'] = _SOFTMAXPARAMETER
DESCRIPTOR.message_types_by_name['TanHParameter'] = _TANHPARAMETER
DESCRIPTOR.message_types_by_name['TileParameter'] = _TILEPARAMETER
DESCRIPTOR.message_types_by_name['ThresholdParameter'] = _THRESHOLDPARAMETER
DESCRIPTOR.message_types_by_name['WindowDataParameter'] = _WINDOWDATAPARAMETER
DESCRIPTOR.message_types_by_name['SPPParameter'] = _SPPPARAMETER
DESCRIPTOR.message_types_by_name['V1LayerParameter'] = _V1LAYERPARAMETER
DESCRIPTOR.message_types_by_name['V0LayerParameter'] = _V0LAYERPARAMETER
DESCRIPTOR.message_types_by_name['PReLUParameter'] = _PRELUPARAMETER
DESCRIPTOR.enum_types_by_name['Phase'] = _PHASE
BlobShape = _reflection.GeneratedProtocolMessageType('BlobShape', (_message.Message,), dict(
DESCRIPTOR = _BLOBSHAPE,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.BlobShape)
))
_sym_db.RegisterMessage(BlobShape)
BlobProto = _reflection.GeneratedProtocolMessageType('BlobProto', (_message.Message,), dict(
DESCRIPTOR = _BLOBPROTO,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.BlobProto)
))
_sym_db.RegisterMessage(BlobProto)
BlobProtoVector = _reflection.GeneratedProtocolMessageType('BlobProtoVector', (_message.Message,), dict(
DESCRIPTOR = _BLOBPROTOVECTOR,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.BlobProtoVector)
))
_sym_db.RegisterMessage(BlobProtoVector)
Datum = _reflection.GeneratedProtocolMessageType('Datum', (_message.Message,), dict(
DESCRIPTOR = _DATUM,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.Datum)
))
_sym_db.RegisterMessage(Datum)
FillerParameter = _reflection.GeneratedProtocolMessageType('FillerParameter', (_message.Message,), dict(
DESCRIPTOR = _FILLERPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.FillerParameter)
))
_sym_db.RegisterMessage(FillerParameter)
NetParameter = _reflection.GeneratedProtocolMessageType('NetParameter', (_message.Message,), dict(
DESCRIPTOR = _NETPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.NetParameter)
))
_sym_db.RegisterMessage(NetParameter)
SolverParameter = _reflection.GeneratedProtocolMessageType('SolverParameter', (_message.Message,), dict(
DESCRIPTOR = _SOLVERPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.SolverParameter)
))
_sym_db.RegisterMessage(SolverParameter)
SolverState = _reflection.GeneratedProtocolMessageType('SolverState', (_message.Message,), dict(
DESCRIPTOR = _SOLVERSTATE,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.SolverState)
))
_sym_db.RegisterMessage(SolverState)
NetState = _reflection.GeneratedProtocolMessageType('NetState', (_message.Message,), dict(
DESCRIPTOR = _NETSTATE,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.NetState)
))
_sym_db.RegisterMessage(NetState)
NetStateRule = _reflection.GeneratedProtocolMessageType('NetStateRule', (_message.Message,), dict(
DESCRIPTOR = _NETSTATERULE,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.NetStateRule)
))
_sym_db.RegisterMessage(NetStateRule)
ParamSpec = _reflection.GeneratedProtocolMessageType('ParamSpec', (_message.Message,), dict(
DESCRIPTOR = _PARAMSPEC,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ParamSpec)
))
_sym_db.RegisterMessage(ParamSpec)
LayerParameter = _reflection.GeneratedProtocolMessageType('LayerParameter', (_message.Message,), dict(
DESCRIPTOR = _LAYERPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.LayerParameter)
))
_sym_db.RegisterMessage(LayerParameter)
TransformationParameter = _reflection.GeneratedProtocolMessageType('TransformationParameter', (_message.Message,), dict(
DESCRIPTOR = _TRANSFORMATIONPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.TransformationParameter)
))
_sym_db.RegisterMessage(TransformationParameter)
LossParameter = _reflection.GeneratedProtocolMessageType('LossParameter', (_message.Message,), dict(
DESCRIPTOR = _LOSSPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.LossParameter)
))
_sym_db.RegisterMessage(LossParameter)
AccuracyParameter = _reflection.GeneratedProtocolMessageType('AccuracyParameter', (_message.Message,), dict(
DESCRIPTOR = _ACCURACYPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.AccuracyParameter)
))
_sym_db.RegisterMessage(AccuracyParameter)
ArgMaxParameter = _reflection.GeneratedProtocolMessageType('ArgMaxParameter', (_message.Message,), dict(
DESCRIPTOR = _ARGMAXPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ArgMaxParameter)
))
_sym_db.RegisterMessage(ArgMaxParameter)
ConcatParameter = _reflection.GeneratedProtocolMessageType('ConcatParameter', (_message.Message,), dict(
DESCRIPTOR = _CONCATPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ConcatParameter)
))
_sym_db.RegisterMessage(ConcatParameter)
BatchNormParameter = _reflection.GeneratedProtocolMessageType('BatchNormParameter', (_message.Message,), dict(
DESCRIPTOR = _BATCHNORMPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.BatchNormParameter)
))
_sym_db.RegisterMessage(BatchNormParameter)
BiasParameter = _reflection.GeneratedProtocolMessageType('BiasParameter', (_message.Message,), dict(
DESCRIPTOR = _BIASPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.BiasParameter)
))
_sym_db.RegisterMessage(BiasParameter)
ContrastiveLossParameter = _reflection.GeneratedProtocolMessageType('ContrastiveLossParameter', (_message.Message,), dict(
DESCRIPTOR = _CONTRASTIVELOSSPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ContrastiveLossParameter)
))
_sym_db.RegisterMessage(ContrastiveLossParameter)
ConvolutionParameter = _reflection.GeneratedProtocolMessageType('ConvolutionParameter', (_message.Message,), dict(
DESCRIPTOR = _CONVOLUTIONPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ConvolutionParameter)
))
_sym_db.RegisterMessage(ConvolutionParameter)
DataParameter = _reflection.GeneratedProtocolMessageType('DataParameter', (_message.Message,), dict(
DESCRIPTOR = _DATAPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.DataParameter)
))
_sym_db.RegisterMessage(DataParameter)
DropoutParameter = _reflection.GeneratedProtocolMessageType('DropoutParameter', (_message.Message,), dict(
DESCRIPTOR = _DROPOUTPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.DropoutParameter)
))
_sym_db.RegisterMessage(DropoutParameter)
DummyDataParameter = _reflection.GeneratedProtocolMessageType('DummyDataParameter', (_message.Message,), dict(
DESCRIPTOR = _DUMMYDATAPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.DummyDataParameter)
))
_sym_db.RegisterMessage(DummyDataParameter)
EltwiseParameter = _reflection.GeneratedProtocolMessageType('EltwiseParameter', (_message.Message,), dict(
DESCRIPTOR = _ELTWISEPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.EltwiseParameter)
))
_sym_db.RegisterMessage(EltwiseParameter)
ELUParameter = _reflection.GeneratedProtocolMessageType('ELUParameter', (_message.Message,), dict(
DESCRIPTOR = _ELUPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ELUParameter)
))
_sym_db.RegisterMessage(ELUParameter)
EmbedParameter = _reflection.GeneratedProtocolMessageType('EmbedParameter', (_message.Message,), dict(
DESCRIPTOR = _EMBEDPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.EmbedParameter)
))
_sym_db.RegisterMessage(EmbedParameter)
ExpParameter = _reflection.GeneratedProtocolMessageType('ExpParameter', (_message.Message,), dict(
DESCRIPTOR = _EXPPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ExpParameter)
))
_sym_db.RegisterMessage(ExpParameter)
FlattenParameter = _reflection.GeneratedProtocolMessageType('FlattenParameter', (_message.Message,), dict(
DESCRIPTOR = _FLATTENPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.FlattenParameter)
))
_sym_db.RegisterMessage(FlattenParameter)
HDF5DataParameter = _reflection.GeneratedProtocolMessageType('HDF5DataParameter', (_message.Message,), dict(
DESCRIPTOR = _HDF5DATAPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.HDF5DataParameter)
))
_sym_db.RegisterMessage(HDF5DataParameter)
HDF5OutputParameter = _reflection.GeneratedProtocolMessageType('HDF5OutputParameter', (_message.Message,), dict(
DESCRIPTOR = _HDF5OUTPUTPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.HDF5OutputParameter)
))
_sym_db.RegisterMessage(HDF5OutputParameter)
HingeLossParameter = _reflection.GeneratedProtocolMessageType('HingeLossParameter', (_message.Message,), dict(
DESCRIPTOR = _HINGELOSSPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.HingeLossParameter)
))
_sym_db.RegisterMessage(HingeLossParameter)
ImageDataParameter = _reflection.GeneratedProtocolMessageType('ImageDataParameter', (_message.Message,), dict(
DESCRIPTOR = _IMAGEDATAPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ImageDataParameter)
))
_sym_db.RegisterMessage(ImageDataParameter)
InfogainLossParameter = _reflection.GeneratedProtocolMessageType('InfogainLossParameter', (_message.Message,), dict(
DESCRIPTOR = _INFOGAINLOSSPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.InfogainLossParameter)
))
_sym_db.RegisterMessage(InfogainLossParameter)
InnerProductParameter = _reflection.GeneratedProtocolMessageType('InnerProductParameter', (_message.Message,), dict(
DESCRIPTOR = _INNERPRODUCTPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.InnerProductParameter)
))
_sym_db.RegisterMessage(InnerProductParameter)
LogParameter = _reflection.GeneratedProtocolMessageType('LogParameter', (_message.Message,), dict(
DESCRIPTOR = _LOGPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.LogParameter)
))
_sym_db.RegisterMessage(LogParameter)
LRNParameter = _reflection.GeneratedProtocolMessageType('LRNParameter', (_message.Message,), dict(
DESCRIPTOR = _LRNPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.LRNParameter)
))
_sym_db.RegisterMessage(LRNParameter)
MemoryDataParameter = _reflection.GeneratedProtocolMessageType('MemoryDataParameter', (_message.Message,), dict(
DESCRIPTOR = _MEMORYDATAPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.MemoryDataParameter)
))
_sym_db.RegisterMessage(MemoryDataParameter)
MVNParameter = _reflection.GeneratedProtocolMessageType('MVNParameter', (_message.Message,), dict(
DESCRIPTOR = _MVNPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.MVNParameter)
))
_sym_db.RegisterMessage(MVNParameter)
PoolingParameter = _reflection.GeneratedProtocolMessageType('PoolingParameter', (_message.Message,), dict(
DESCRIPTOR = _POOLINGPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.PoolingParameter)
))
_sym_db.RegisterMessage(PoolingParameter)
PowerParameter = _reflection.GeneratedProtocolMessageType('PowerParameter', (_message.Message,), dict(
DESCRIPTOR = _POWERPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.PowerParameter)
))
_sym_db.RegisterMessage(PowerParameter)
PythonParameter = _reflection.GeneratedProtocolMessageType('PythonParameter', (_message.Message,), dict(
DESCRIPTOR = _PYTHONPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.PythonParameter)
))
_sym_db.RegisterMessage(PythonParameter)
ReductionParameter = _reflection.GeneratedProtocolMessageType('ReductionParameter', (_message.Message,), dict(
DESCRIPTOR = _REDUCTIONPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ReductionParameter)
))
_sym_db.RegisterMessage(ReductionParameter)
ReLUParameter = _reflection.GeneratedProtocolMessageType('ReLUParameter', (_message.Message,), dict(
DESCRIPTOR = _RELUPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ReLUParameter)
))
_sym_db.RegisterMessage(ReLUParameter)
ReshapeParameter = _reflection.GeneratedProtocolMessageType('ReshapeParameter', (_message.Message,), dict(
DESCRIPTOR = _RESHAPEPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ReshapeParameter)
))
_sym_db.RegisterMessage(ReshapeParameter)
ScaleParameter = _reflection.GeneratedProtocolMessageType('ScaleParameter', (_message.Message,), dict(
DESCRIPTOR = _SCALEPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ScaleParameter)
))
_sym_db.RegisterMessage(ScaleParameter)
SigmoidParameter = _reflection.GeneratedProtocolMessageType('SigmoidParameter', (_message.Message,), dict(
DESCRIPTOR = _SIGMOIDPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.SigmoidParameter)
))
_sym_db.RegisterMessage(SigmoidParameter)
SliceParameter = _reflection.GeneratedProtocolMessageType('SliceParameter', (_message.Message,), dict(
DESCRIPTOR = _SLICEPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.SliceParameter)
))
_sym_db.RegisterMessage(SliceParameter)
SoftmaxParameter = _reflection.GeneratedProtocolMessageType('SoftmaxParameter', (_message.Message,), dict(
DESCRIPTOR = _SOFTMAXPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.SoftmaxParameter)
))
_sym_db.RegisterMessage(SoftmaxParameter)
TanHParameter = _reflection.GeneratedProtocolMessageType('TanHParameter', (_message.Message,), dict(
DESCRIPTOR = _TANHPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.TanHParameter)
))
_sym_db.RegisterMessage(TanHParameter)
TileParameter = _reflection.GeneratedProtocolMessageType('TileParameter', (_message.Message,), dict(
DESCRIPTOR = _TILEPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.TileParameter)
))
_sym_db.RegisterMessage(TileParameter)
ThresholdParameter = _reflection.GeneratedProtocolMessageType('ThresholdParameter', (_message.Message,), dict(
DESCRIPTOR = _THRESHOLDPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.ThresholdParameter)
))
_sym_db.RegisterMessage(ThresholdParameter)
WindowDataParameter = _reflection.GeneratedProtocolMessageType('WindowDataParameter', (_message.Message,), dict(
DESCRIPTOR = _WINDOWDATAPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.WindowDataParameter)
))
_sym_db.RegisterMessage(WindowDataParameter)
SPPParameter = _reflection.GeneratedProtocolMessageType('SPPParameter', (_message.Message,), dict(
DESCRIPTOR = _SPPPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.SPPParameter)
))
_sym_db.RegisterMessage(SPPParameter)
V1LayerParameter = _reflection.GeneratedProtocolMessageType('V1LayerParameter', (_message.Message,), dict(
DESCRIPTOR = _V1LAYERPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.V1LayerParameter)
))
_sym_db.RegisterMessage(V1LayerParameter)
V0LayerParameter = _reflection.GeneratedProtocolMessageType('V0LayerParameter', (_message.Message,), dict(
DESCRIPTOR = _V0LAYERPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.V0LayerParameter)
))
_sym_db.RegisterMessage(V0LayerParameter)
PReLUParameter = _reflection.GeneratedProtocolMessageType('PReLUParameter', (_message.Message,), dict(
DESCRIPTOR = _PRELUPARAMETER,
__module__ = 'caffe_pb2'
# @@protoc_insertion_point(class_scope:caffe.PReLUParameter)
))
_sym_db.RegisterMessage(PReLUParameter)
_BLOBSHAPE.fields_by_name['dim'].has_options = True
_BLOBSHAPE.fields_by_name['dim']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_BLOBPROTO.fields_by_name['data'].has_options = True
_BLOBPROTO.fields_by_name['data']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_BLOBPROTO.fields_by_name['diff'].has_options = True
_BLOBPROTO.fields_by_name['diff']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_BLOBPROTO.fields_by_name['double_data'].has_options = True
_BLOBPROTO.fields_by_name['double_data']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_BLOBPROTO.fields_by_name['double_diff'].has_options = True
_BLOBPROTO.fields_by_name['double_diff']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
# @@protoc_insertion_point(module_scope)
| 242,354
| 41.963127
| 27,796
|
py
|
chainer
|
chainer-master/chainer/links/model/classifier.py
|
from chainer.functions.evaluation import accuracy
from chainer.functions.loss import softmax_cross_entropy
from chainer import link
from chainer import reporter
class Classifier(link.Chain):
"""A simple classifier model.
This is an example of chain that wraps another chain. It computes the
loss and accuracy based on a given input/label pair.
Args:
predictor (~chainer.Link): Predictor network.
lossfun (callable):
Loss function.
You can specify one of loss functions from
:doc:`built-in loss functions </reference/functions>`, or
your own loss function (see the example below).
It should not be an
:doc:`loss functions with parameters </reference/links>`
(i.e., :class:`~chainer.Link` instance).
The function must accept two argument (an output from predictor
and its ground truth labels), and return a loss.
Returned value must be a Variable derived from the input Variable
to perform backpropagation on the variable.
accfun (callable):
Function that computes accuracy.
You can specify one of evaluation functions from
:doc:`built-in evaluation functions </reference/functions>`, or
your own evaluation function.
The signature of the function is the same as ``lossfun``.
label_key (int or str): Key to specify label variable from arguments.
When it is ``int``, a variable in positional arguments is used.
And when it is ``str``, a variable in keyword arguments is used.
Attributes:
predictor (~chainer.Link): Predictor network.
lossfun (callable):
Loss function.
See the description in the arguments for details.
accfun (callable):
Function that computes accuracy.
See the description in the arguments for details.
y (~chainer.Variable): Prediction for the last minibatch.
loss (~chainer.Variable): Loss value for the last minibatch.
accuracy (~chainer.Variable): Accuracy for the last minibatch.
compute_accuracy (bool): If ``True``, compute accuracy on the forward
computation. The default value is ``True``.
.. note::
This link uses :func:`chainer.softmax_cross_entropy` with
default arguments as a loss function (specified by ``lossfun``),
if users do not explicitly change it. In particular, the loss function
does not support double backpropagation.
If you need second or higher order differentiation, you need to turn
it on with ``enable_double_backprop=True``:
>>> import chainer.functions as F
>>> import chainer.links as L
>>>
>>> def lossfun(x, t):
... return F.softmax_cross_entropy(
... x, t, enable_double_backprop=True)
>>>
>>> predictor = L.Linear(10)
>>> model = L.Classifier(predictor, lossfun=lossfun)
"""
compute_accuracy = True
def __init__(self, predictor,
lossfun=softmax_cross_entropy.softmax_cross_entropy,
accfun=accuracy.accuracy,
label_key=-1):
if not (isinstance(label_key, (int, str))):
raise TypeError('label_key must be int or str, but is %s' %
type(label_key))
super(Classifier, self).__init__()
self.lossfun = lossfun
self.accfun = accfun
self.y = None
self.loss = None
self.accuracy = None
self.label_key = label_key
with self.init_scope():
self.predictor = predictor
def forward(self, *args, **kwargs):
"""Computes the loss value for an input and label pair.
It also computes accuracy and stores it to the attribute.
Args:
args (list of ~chainer.Variable): Input minibatch.
kwargs (dict of ~chainer.Variable): Input minibatch.
When ``label_key`` is ``int``, the corresponding element in ``args``
is treated as ground truth labels. And when it is ``str``, the
element in ``kwargs`` is used.
The all elements of ``args`` and ``kwargs`` except the ground truth
labels are features.
It feeds features to the predictor and compare the result
with ground truth labels.
.. note::
We set ``None`` to the attributes ``y``, ``loss`` and ``accuracy``
each time before running the predictor, to avoid unnecessary memory
consumption. Note that the variables set on those attributes hold
the whole computation graph when they are computed. The graph
stores interim values on memory required for back-propagation.
We need to clear the attributes to free those values.
Returns:
~chainer.Variable: Loss value.
"""
if isinstance(self.label_key, int):
if not (-len(args) <= self.label_key < len(args)):
msg = 'Label key %d is out of bounds' % self.label_key
raise ValueError(msg)
t = args[self.label_key]
if self.label_key == -1:
args = args[:-1]
else:
args = args[:self.label_key] + args[self.label_key + 1:]
elif isinstance(self.label_key, str):
if self.label_key not in kwargs:
msg = 'Label key "%s" is not found' % self.label_key
raise ValueError(msg)
t = kwargs[self.label_key]
del kwargs[self.label_key]
self.y = None
self.loss = None
self.accuracy = None
self.y = self.predictor(*args, **kwargs)
self.loss = self.lossfun(self.y, t)
reporter.report({'loss': self.loss}, self)
if self.compute_accuracy:
self.accuracy = self.accfun(self.y, t)
reporter.report({'accuracy': self.accuracy}, self)
return self.loss
| 6,083
| 39.56
| 79
|
py
|
chainer
|
chainer-master/chainer/links/model/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/links/model/vision/resnet.py
|
import collections
import os
import sys
import warnings
import numpy
try:
from PIL import Image
available = True
except ImportError as e:
available = False
_import_error = e
import chainer
from chainer.dataset.convert import concat_examples
from chainer.dataset import download
from chainer import function
from chainer.functions.activation.relu import relu
from chainer.functions.activation.softmax import softmax
from chainer.functions.array.reshape import reshape
from chainer.functions.math.sum import sum
from chainer.functions.pooling.average_pooling_2d import average_pooling_2d
from chainer.functions.pooling.max_pooling_nd import max_pooling_2d
from chainer.initializers import constant
from chainer.initializers import normal
from chainer import link
from chainer.links.connection.convolution_2d import Convolution2D
from chainer.links.connection.linear import Linear
from chainer.links.normalization.batch_normalization import BatchNormalization
from chainer.serializers import npz
from chainer.utils import argument
from chainer.utils import imgproc
from chainer.variable import Variable
class ResNetLayers(link.Chain):
"""A pre-trained CNN model provided by MSRA.
When you specify the path of the pre-trained chainer model serialized as
a ``.npz`` file in the constructor, this chain model automatically
initializes all the parameters with it.
This model would be useful when you want to extract a semantic feature
vector per image, or fine-tune the model on a different dataset.
Note that unlike ``VGG16Layers``, it does not automatically download a
pre-trained caffemodel. This caffemodel can be downloaded at
`GitHub <https://github.com/KaimingHe/deep-residual-networks>`_.
If you want to manually convert the pre-trained caffemodel to a chainer
model that can be specified in the constructor,
please use ``convert_caffemodel_to_npz`` classmethod instead.
See: K. He et. al., `Deep Residual Learning for Image Recognition
<https://arxiv.org/abs/1512.03385>`_
Args:
pretrained_model (str): the destination of the pre-trained
chainer model serialized as a ``.npz`` file.
If this argument is specified as ``auto``,
it automatically loads and converts the caffemodel from
``$CHAINER_DATASET_ROOT/pfnet/chainer/models/ResNet-{n-layers}-model.caffemodel``,
where ``$CHAINER_DATASET_ROOT`` is set as
``$HOME/.chainer/dataset`` unless you specify another value
by modifying the environment variable and {n_layers} is replaced
with the specified number of layers given as the first argument to
this constructor. Note that in this case the converted chainer
model is stored on the same directory and automatically used from
the next time.
If this argument is specified as ``None``, all the parameters
are not initialized by the pre-trained model, but the default
initializer used in the original paper, i.e.,
``chainer.initializers.HeNormal(scale=1.0)``.
n_layers (int): The number of layers of this model. It should be either
50, 101, or 152.
downsample_fb (bool): If this argument is specified as ``False``,
it performs downsampling by placing stride 2
on the 1x1 convolutional layers (the original MSRA ResNet).
If this argument is specified as ``True``, it performs downsampling
by placing stride 2 on the 3x3 convolutional layers
(Facebook ResNet).
Attributes:
available_layers (list of str): The list of available layer names
used by ``forward`` and ``extract`` methods.
"""
def __init__(self, pretrained_model, n_layers, downsample_fb=False):
super(ResNetLayers, self).__init__()
if pretrained_model:
# As a sampling process is time-consuming,
# we employ a zero initializer for faster computation.
conv_kwargs = {'initialW': constant.Zero()}
else:
# employ default initializers used in the original paper
conv_kwargs = {'initialW': normal.HeNormal(scale=1.0)}
kwargs = conv_kwargs.copy()
kwargs['downsample_fb'] = downsample_fb
if n_layers == 50:
block = [3, 4, 6, 3]
elif n_layers == 101:
block = [3, 4, 23, 3]
elif n_layers == 152:
block = [3, 8, 36, 3]
else:
raise ValueError('The n_layers argument should be either 50, 101,'
' or 152, but {} was given.'.format(n_layers))
with self.init_scope():
self.conv1 = Convolution2D(3, 64, 7, 2, 3, **conv_kwargs)
self.bn1 = BatchNormalization(64)
self.res2 = BuildingBlock(block[0], 64, 64, 256, 1, **kwargs)
self.res3 = BuildingBlock(block[1], 256, 128, 512, 2, **kwargs)
self.res4 = BuildingBlock(block[2], 512, 256, 1024, 2, **kwargs)
self.res5 = BuildingBlock(block[3], 1024, 512, 2048, 2, **kwargs)
self.fc6 = Linear(2048, 1000)
if pretrained_model and pretrained_model.endswith('.caffemodel'):
_retrieve(n_layers, 'ResNet-{}-model.npz'.format(n_layers),
pretrained_model, self)
elif pretrained_model:
npz.load_npz(pretrained_model, self)
@property
def functions(self):
return collections.OrderedDict([
('conv1', [self.conv1, self.bn1, relu]),
('pool1', [lambda x: max_pooling_2d(x, ksize=3, stride=2)]),
('res2', [self.res2]),
('res3', [self.res3]),
('res4', [self.res4]),
('res5', [self.res5]),
('pool5', [_global_average_pooling_2d]),
('fc6', [self.fc6]),
('prob', [softmax]),
])
@property
def available_layers(self):
return list(self.functions.keys())
@classmethod
def convert_caffemodel_to_npz(cls, path_caffemodel, path_npz, n_layers=50):
"""Converts a pre-trained caffemodel to a chainer model.
Args:
path_caffemodel (str): Path of the pre-trained caffemodel.
path_npz (str): Path of the converted chainer model.
"""
# As CaffeFunction uses shortcut symbols,
# we import CaffeFunction here.
from chainer.links.caffe.caffe_function import CaffeFunction
caffemodel = CaffeFunction(path_caffemodel)
chainermodel = cls(pretrained_model=None, n_layers=n_layers)
if n_layers == 50:
_transfer_resnet50(caffemodel, chainermodel)
elif n_layers == 101:
_transfer_resnet101(caffemodel, chainermodel)
elif n_layers == 152:
_transfer_resnet152(caffemodel, chainermodel)
else:
raise ValueError('The n_layers argument should be either 50, 101,'
' or 152, but {} was given.'.format(n_layers))
npz.save_npz(path_npz, chainermodel, compression=False)
def forward(self, x, layers=None, **kwargs):
"""forward(self, x, layers=['prob'])
Computes all the feature maps specified by ``layers``.
Args:
x (~chainer.Variable): Input variable. It should be prepared by
``prepare`` function.
layers (list of str): The list of layer names you want to extract.
Returns:
Dictionary of ~chainer.Variable: A directory in which
the key contains the layer name and the value contains
the corresponding feature map variable.
"""
if layers is None:
layers = ['prob']
if kwargs:
argument.check_unexpected_kwargs(
kwargs, test='test argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
h = x
activations = {}
target_layers = set(layers)
for key, funcs in self.functions.items():
if not target_layers:
break
for func in funcs:
h = func(h)
if key in target_layers:
activations[key] = h
target_layers.remove(key)
return activations
def extract(self, images, layers=None, size=(224, 224), **kwargs):
"""extract(self, images, layers=['pool5'], size=(224, 224))
Extracts all the feature maps of given images.
The difference of directly executing ``forward`` is that
it directly accepts images as an input and automatically
transforms them to a proper variable. That is,
it is also interpreted as a shortcut method that implicitly calls
``prepare`` and ``forward`` functions.
Unlike ``predict`` method, this method does not override
``chainer.config.train`` and ``chainer.config.enable_backprop``
configuration. If you want to extract features without updating
model parameters, you need to manually set configuration when
calling this method as follows:
.. code-block:: python
# model is an instance of ResNetLayers (50 or 101 or 152 layers)
with chainer.using_config('train', False):
with chainer.using_config('enable_backprop', False):
feature = model.extract([image])
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
layers (list of str): The list of layer names you want to extract.
size (pair of ints): The resolution of resized images used as
an input of CNN. All the given images are not resized
if this argument is ``None``, but the resolutions of
all the images should be the same.
Returns:
Dictionary of ~chainer.Variable: A directory in which
the key contains the layer name and the value contains
the corresponding feature map variable.
"""
if layers is None:
layers = ['pool5']
if kwargs:
argument.check_unexpected_kwargs(
kwargs, test='test argument is not supported anymore. '
'Use chainer.using_config',
volatile='volatile argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
x = concat_examples([prepare(img, size=size) for img in images])
x = Variable(self.xp.asarray(x))
return self(x, layers=layers)
def predict(self, images, oversample=True):
"""Computes all the probabilities of given images.
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
When you specify a color image as a :class:`numpy.ndarray`,
make sure that color order is RGB.
oversample (bool): If ``True``, it averages results across
center, corners, and mirrors. Otherwise, it uses only the
center.
Returns:
~chainer.Variable: Output that contains the class probabilities
of given images.
"""
x = concat_examples([prepare(img, size=(256, 256)) for img in images])
if oversample:
x = imgproc.oversample(x, crop_dims=(224, 224))
else:
x = x[:, :, 16:240, 16:240]
# Use no_backprop_mode to reduce memory consumption
with function.no_backprop_mode(), chainer.using_config('train', False):
x = Variable(self.xp.asarray(x))
y = self(x, layers=['prob'])['prob']
if oversample:
n = len(y) // 10
y_shape = y.shape[1:]
y = reshape(y, (n, 10) + y_shape)
y = sum(y, axis=1) / 10
return y
class ResNet50Layers(ResNetLayers):
"""A pre-trained CNN model with 50 layers provided by MSRA.
When you specify the path of the pre-trained chainer model serialized as
a ``.npz`` file in the constructor, this chain model automatically
initializes all the parameters with it.
This model would be useful when you want to extract a semantic feature
vector per image, or fine-tune the model on a different dataset.
Note that unlike ``VGG16Layers``, it does not automatically download a
pre-trained caffemodel. This caffemodel can be downloaded at
`GitHub <https://github.com/KaimingHe/deep-residual-networks>`_.
If you want to manually convert the pre-trained caffemodel to a chainer
model that can be specified in the constructor,
please use ``convert_caffemodel_to_npz`` classmethod instead.
ResNet50 has 25,557,096 trainable parameters, and it's 58% and 43% fewer
than ResNet101 and ResNet152, respectively. On the other hand, the top-5
classification accuracy on ImageNet dataset drops only 0.7% and 1.1% from
ResNet101 and ResNet152, respectively. Therefore, ResNet50 may have the
best balance between the accuracy and the model size. It would be basically
just enough for many cases, but some advanced models for object detection
or semantic segmentation use deeper ones as their building blocks, so these
deeper ResNets are here for making reproduction work easier.
See: K. He et. al., `Deep Residual Learning for Image Recognition
<https://arxiv.org/abs/1512.03385>`_
Args:
pretrained_model (str): the destination of the pre-trained
chainer model serialized as a ``.npz`` file.
If this argument is specified as ``auto``,
it automatically loads and converts the caffemodel from
``$CHAINER_DATASET_ROOT/pfnet/chainer/models/ResNet-50-model.caffemodel``,
where ``$CHAINER_DATASET_ROOT`` is set as
``$HOME/.chainer/dataset`` unless you specify another value
by modifying the environment variable. Note that in this case the
converted chainer model is stored on the same directory and
automatically used from the next time.
If this argument is specified as ``None``, all the parameters
are not initialized by the pre-trained model, but the default
initializer used in the original paper, i.e.,
``chainer.initializers.HeNormal(scale=1.0)``.
downsample_fb (bool): If this argument is specified as ``False``,
it performs downsampling by placing stride 2
on the 1x1 convolutional layers (the original MSRA ResNet).
If this argument is specified as ``True``, it performs downsampling
by placing stride 2 on the 3x3 convolutional layers
(Facebook ResNet).
Attributes:
available_layers (list of str): The list of available layer names
used by ``forward`` and ``extract`` methods.
"""
def __init__(self, pretrained_model='auto', downsample_fb=False):
if pretrained_model == 'auto':
pretrained_model = 'ResNet-50-model.caffemodel'
super(ResNet50Layers, self).__init__(
pretrained_model, 50, downsample_fb)
class ResNet101Layers(ResNetLayers):
"""A pre-trained CNN model with 101 layers provided by MSRA.
When you specify the path of the pre-trained chainer model serialized as
a ``.npz`` file in the constructor, this chain model automatically
initializes all the parameters with it.
This model would be useful when you want to extract a semantic feature
vector per image, or fine-tune the model on a different dataset.
Note that unlike ``VGG16Layers``, it does not automatically download a
pre-trained caffemodel. This caffemodel can be downloaded at
`GitHub <https://github.com/KaimingHe/deep-residual-networks>`_.
If you want to manually convert the pre-trained caffemodel to a chainer
model that can be specified in the constructor,
please use ``convert_caffemodel_to_npz`` classmethod instead.
ResNet101 has 44,549,224 trainable parameters, and it's 43% fewer than
ResNet152 model, while the top-5 classification accuracy on ImageNet
dataset drops 1.1% from ResNet152. For many cases, ResNet50 may have the
best balance between the accuracy and the model size.
See: K. He et. al., `Deep Residual Learning for Image Recognition
<https://arxiv.org/abs/1512.03385>`_
Args:
pretrained_model (str): the destination of the pre-trained
chainer model serialized as a ``.npz`` file.
If this argument is specified as ``auto``,
it automatically loads and converts the caffemodel from
``$CHAINER_DATASET_ROOT/pfnet/chainer/models/ResNet-101-model.caffemodel``,
where ``$CHAINER_DATASET_ROOT`` is set as
``$HOME/.chainer/dataset`` unless you specify another value
by modifying the environment variable. Note that in this case the
converted chainer model is stored on the same directory and
automatically used from the next time.
If this argument is specified as ``None``, all the parameters
are not initialized by the pre-trained model, but the default
initializer used in the original paper, i.e.,
``chainer.initializers.HeNormal(scale=1.0)``.
downsample_fb (bool): If this argument is specified as ``False``,
it performs downsampling by placing stride 2
on the 1x1 convolutional layers (the original MSRA ResNet).
If this argument is specified as ``True``, it performs downsampling
by placing stride 2 on the 3x3 convolutional layers
(Facebook ResNet).
Attributes:
available_layers (list of str): The list of available layer names
used by ``forward`` and ``extract`` methods.
"""
def __init__(self, pretrained_model='auto', downsample_fb=False):
if pretrained_model == 'auto':
pretrained_model = 'ResNet-101-model.caffemodel'
super(ResNet101Layers, self).__init__(
pretrained_model, 101, downsample_fb)
class ResNet152Layers(ResNetLayers):
"""A pre-trained CNN model with 152 layers provided by MSRA.
When you specify the path of the pre-trained chainer model serialized as
a ``.npz`` file in the constructor, this chain model automatically
initializes all the parameters with it.
This model would be useful when you want to extract a semantic feature
vector per image, or fine-tune the model on a different dataset.
Note that unlike ``VGG16Layers``, it does not automatically download a
pre-trained caffemodel. This caffemodel can be downloaded at
`GitHub <https://github.com/KaimingHe/deep-residual-networks>`_.
If you want to manually convert the pre-trained caffemodel to a chainer
model that can be specified in the constructor,
please use ``convert_caffemodel_to_npz`` classmethod instead.
ResNet152 has 60,192,872 trainable parameters, and it's the deepest ResNet
model and it achieves the best result on ImageNet classification task in
`ILSVRC 2015 <http://image-net.org/challenges/LSVRC/2015/results#loc>`_.
See: K. He et. al., `Deep Residual Learning for Image Recognition
<https://arxiv.org/abs/1512.03385>`_
Args:
pretrained_model (str): the destination of the pre-trained
chainer model serialized as a ``.npz`` file.
If this argument is specified as ``auto``,
it automatically loads and converts the caffemodel from
``$CHAINER_DATASET_ROOT/pfnet/chainer/models/ResNet-152-model.caffemodel``,
where ``$CHAINER_DATASET_ROOT`` is set as
``$HOME/.chainer/dataset`` unless you specify another value
by modifying the environment variable. Note that in this case the
converted chainer model is stored on the same directory and
automatically used from the next time.
If this argument is specified as ``None``, all the parameters
are not initialized by the pre-trained model, but the default
initializer used in the original paper, i.e.,
``chainer.initializers.HeNormal(scale=1.0)``.
downsample_fb (bool): If this argument is specified as ``False``,
it performs downsampling by placing stride 2
on the 1x1 convolutional layers (the original MSRA ResNet).
If this argument is specified as ``True``, it performs downsampling
by placing stride 2 on the 3x3 convolutional layers
(Facebook ResNet).
Attributes:
available_layers (list of str): The list of available layer names
used by ``forward`` and ``extract`` methods.
"""
def __init__(self, pretrained_model='auto', downsample_fb=False):
if pretrained_model == 'auto':
pretrained_model = 'ResNet-152-model.caffemodel'
super(ResNet152Layers, self).__init__(
pretrained_model, 152, downsample_fb)
def prepare(image, size=(224, 224)):
"""Converts the given image to a numpy array for ResNet.
Note that this method must be called before calling ``forward``,
because the pre-trained resnet model will resize the given
image, convert from RGB to BGR, subtract the mean,
and permute the dimensions before calling.
Args:
image (PIL.Image or numpy.ndarray): Input image.
If an input is ``numpy.ndarray``, its shape must be
``(height, width)``, ``(height, width, channels)``,
or ``(channels, height, width)``, and
the order of the channels must be RGB.
size (pair of ints): Size of converted images.
If ``None``, the given image is not resized.
Returns:
numpy.ndarray: The converted output array.
"""
if not available:
raise ImportError('PIL cannot be loaded. Install Pillow!\n'
'The actual import error is as follows:\n' +
str(_import_error))
dtype = chainer.get_dtype()
if isinstance(image, numpy.ndarray):
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0, :, :]
elif image.shape[0] == 3:
image = image.transpose((1, 2, 0))
image = Image.fromarray(image.astype(numpy.uint8))
image = image.convert('RGB')
if size:
image = image.resize(size)
image = numpy.asarray(image, dtype=dtype)
image = image[:, :, ::-1]
# NOTE: in the original paper they subtract a fixed mean image,
# however, in order to support arbitrary size we instead use the
# mean pixel (rather than mean image) as with VGG team. The mean
# value used in ResNet is slightly different from that of VGG16.
image -= numpy.array(
[103.063, 115.903, 123.152], dtype=dtype)
image = image.transpose((2, 0, 1))
return image
class BuildingBlock(link.Chain):
"""A building block that consists of several Bottleneck layers.
Args:
n_layer (int): *(deprecated since v7.0.0)*
`n_layer` is now deprecated for consistency of naming choice.
Please use `n_layers` instead.
n_layers (int): Number of layers used in the building block.
in_channels (int): Number of channels of input arrays.
mid_channels (int): Number of channels of intermediate arrays.
out_channels (int): Number of channels of output arrays.
stride (int or tuple of ints): Stride of filter application.
initialW (4-D array): Initial weight value used in
the convolutional layers.
downsample_fb (bool): If this argument is specified as ``False``,
it performs downsampling by placing stride 2
on the 1x1 convolutional layers (the original MSRA ResNet).
If this argument is specified as ``True``, it performs downsampling
by placing stride 2 on the 3x3 convolutional layers
(Facebook ResNet).
"""
def __init__(self, n_layers=None, in_channels=None, mid_channels=None,
out_channels=None, stride=None, initialW=None,
downsample_fb=None, **kwargs):
super(BuildingBlock, self).__init__()
if 'n_layer' in kwargs:
warnings.warn(
'Argument `n_layer` is deprecated. '
'Please use `n_layers` instead',
DeprecationWarning)
n_layers = kwargs['n_layer']
with self.init_scope():
self.a = BottleneckA(
in_channels, mid_channels, out_channels, stride,
initialW, downsample_fb)
self._forward = ['a']
for i in range(n_layers - 1):
name = 'b{}'.format(i + 1)
bottleneck = BottleneckB(out_channels, mid_channels, initialW)
setattr(self, name, bottleneck)
self._forward.append(name)
def forward(self, x):
for name in self._forward:
l = getattr(self, name)
x = l(x)
return x
class BottleneckA(link.Chain):
"""A bottleneck layer that reduces the resolution of the feature map.
Args:
in_channels (int): Number of channels of input arrays.
mid_channels (int): Number of channels of intermediate arrays.
out_channels (int): Number of channels of output arrays.
stride (int or tuple of ints): Stride of filter application.
initialW (4-D array): Initial weight value used in
the convolutional layers.
downsample_fb (bool): If this argument is specified as ``False``,
it performs downsampling by placing stride 2
on the 1x1 convolutional layers (the original MSRA ResNet).
If this argument is specified as ``True``, it performs downsampling
by placing stride 2 on the 3x3 convolutional layers
(Facebook ResNet).
"""
def __init__(self, in_channels, mid_channels, out_channels,
stride=2, initialW=None, downsample_fb=False):
super(BottleneckA, self).__init__()
# In the original MSRA ResNet, stride=2 is on 1x1 convolution.
# In Facebook ResNet, stride=2 is on 3x3 convolution.
stride_1x1, stride_3x3 = (1, stride) if downsample_fb else (stride, 1)
with self.init_scope():
self.conv1 = Convolution2D(
in_channels, mid_channels, 1, stride_1x1, 0, initialW=initialW,
nobias=True)
self.bn1 = BatchNormalization(mid_channels)
self.conv2 = Convolution2D(
mid_channels, mid_channels, 3, stride_3x3, 1,
initialW=initialW, nobias=True)
self.bn2 = BatchNormalization(mid_channels)
self.conv3 = Convolution2D(
mid_channels, out_channels, 1, 1, 0, initialW=initialW,
nobias=True)
self.bn3 = BatchNormalization(out_channels)
self.conv4 = Convolution2D(
in_channels, out_channels, 1, stride, 0, initialW=initialW,
nobias=True)
self.bn4 = BatchNormalization(out_channels)
def forward(self, x):
h1 = relu(self.bn1(self.conv1(x)))
h1 = relu(self.bn2(self.conv2(h1)))
h1 = self.bn3(self.conv3(h1))
h2 = self.bn4(self.conv4(x))
return relu(h1 + h2)
class BottleneckB(link.Chain):
"""A bottleneck layer that maintains the resolution of the feature map.
Args:
in_channels (int): Number of channels of input and output arrays.
mid_channels (int): Number of channels of intermediate arrays.
initialW (4-D array): Initial weight value used in
the convolutional layers.
"""
def __init__(self, in_channels, mid_channels, initialW=None):
super(BottleneckB, self).__init__()
with self.init_scope():
self.conv1 = Convolution2D(
in_channels, mid_channels, 1, 1, 0, initialW=initialW,
nobias=True)
self.bn1 = BatchNormalization(mid_channels)
self.conv2 = Convolution2D(
mid_channels, mid_channels, 3, 1, 1, initialW=initialW,
nobias=True)
self.bn2 = BatchNormalization(mid_channels)
self.conv3 = Convolution2D(
mid_channels, in_channels, 1, 1, 0, initialW=initialW,
nobias=True)
self.bn3 = BatchNormalization(in_channels)
def forward(self, x):
h = relu(self.bn1(self.conv1(x)))
h = relu(self.bn2(self.conv2(h)))
h = self.bn3(self.conv3(h))
return relu(h + x)
def _global_average_pooling_2d(x):
n, channel, rows, cols = x.shape
h = average_pooling_2d(x, (rows, cols), stride=1)
h = reshape(h, (n, channel))
return h
def _transfer_components(src, dst_conv, dst_bn, bname, cname):
src_conv = getattr(src, 'res{}_branch{}'.format(bname, cname))
src_bn = getattr(src, 'bn{}_branch{}'.format(bname, cname))
src_scale = getattr(src, 'scale{}_branch{}'.format(bname, cname))
dst_conv.W.array[:] = src_conv.W.array
dst_bn.avg_mean[:] = src_bn.avg_mean
dst_bn.avg_var[:] = src_bn.avg_var
dst_bn.gamma.array[:] = src_scale.W.array
dst_bn.beta.array[:] = src_scale.bias.b.array
def _transfer_bottleneckA(src, dst, name):
_transfer_components(src, dst.conv1, dst.bn1, name, '2a')
_transfer_components(src, dst.conv2, dst.bn2, name, '2b')
_transfer_components(src, dst.conv3, dst.bn3, name, '2c')
_transfer_components(src, dst.conv4, dst.bn4, name, '1')
def _transfer_bottleneckB(src, dst, name):
_transfer_components(src, dst.conv1, dst.bn1, name, '2a')
_transfer_components(src, dst.conv2, dst.bn2, name, '2b')
_transfer_components(src, dst.conv3, dst.bn3, name, '2c')
def _transfer_block(src, dst, names):
_transfer_bottleneckA(src, dst.a, names[0])
for i, name in enumerate(names[1:]):
dst_bottleneckB = getattr(dst, 'b{}'.format(i + 1))
_transfer_bottleneckB(src, dst_bottleneckB, name)
def _transfer_resnet50(src, dst):
dst.conv1.W.array[:] = src.conv1.W.array
dst.conv1.b.array[:] = src.conv1.b.array
dst.bn1.avg_mean[:] = src.bn_conv1.avg_mean
dst.bn1.avg_var[:] = src.bn_conv1.avg_var
dst.bn1.gamma.array[:] = src.scale_conv1.W.array
dst.bn1.beta.array[:] = src.scale_conv1.bias.b.array
_transfer_block(src, dst.res2, ['2a', '2b', '2c'])
_transfer_block(src, dst.res3, ['3a', '3b', '3c', '3d'])
_transfer_block(src, dst.res4, ['4a', '4b', '4c', '4d', '4e', '4f'])
_transfer_block(src, dst.res5, ['5a', '5b', '5c'])
dst.fc6.W.array[:] = src.fc1000.W.array
dst.fc6.b.array[:] = src.fc1000.b.array
def _transfer_resnet101(src, dst):
dst.conv1.W.array[:] = src.conv1.W.array
dst.bn1.avg_mean[:] = src.bn_conv1.avg_mean
dst.bn1.avg_var[:] = src.bn_conv1.avg_var
dst.bn1.gamma.array[:] = src.scale_conv1.W.array
dst.bn1.beta.array[:] = src.scale_conv1.bias.b.array
_transfer_block(src, dst.res2, ['2a', '2b', '2c'])
_transfer_block(src, dst.res3, ['3a', '3b1', '3b2', '3b3'])
_transfer_block(src, dst.res4,
['4a'] + ['4b{}'.format(i) for i in range(1, 23)])
_transfer_block(src, dst.res5, ['5a', '5b', '5c'])
dst.fc6.W.array[:] = src.fc1000.W.array
dst.fc6.b.array[:] = src.fc1000.b.array
def _transfer_resnet152(src, dst):
dst.conv1.W.array[:] = src.conv1.W.array
dst.bn1.avg_mean[:] = src.bn_conv1.avg_mean
dst.bn1.avg_var[:] = src.bn_conv1.avg_var
dst.bn1.gamma.array[:] = src.scale_conv1.W.array
dst.bn1.beta.array[:] = src.scale_conv1.bias.b.array
_transfer_block(src, dst.res2, ['2a', '2b', '2c'])
_transfer_block(src, dst.res3,
['3a'] + ['3b{}'.format(i) for i in range(1, 8)])
_transfer_block(src, dst.res4,
['4a'] + ['4b{}'.format(i) for i in range(1, 36)])
_transfer_block(src, dst.res5, ['5a', '5b', '5c'])
dst.fc6.W.array[:] = src.fc1000.W.array
dst.fc6.b.array[:] = src.fc1000.b.array
def _make_npz(path_npz, path_caffemodel, model, n_layers):
sys.stderr.write(
'Now loading caffemodel (usually it may take few minutes)\n')
sys.stderr.flush()
if not os.path.exists(path_caffemodel):
raise IOError(
'The pre-trained caffemodel does not exist. Please download it '
'from \'https://github.com/KaimingHe/deep-residual-networks\', '
'and place it on {}'.format(path_caffemodel))
ResNetLayers.convert_caffemodel_to_npz(path_caffemodel, path_npz, n_layers)
npz.load_npz(path_npz, model)
return model
def _retrieve(n_layers, name_npz, name_caffemodel, model):
root = download.get_dataset_directory('pfnet/chainer/models/')
path = os.path.join(root, name_npz)
path_caffemodel = os.path.join(root, name_caffemodel)
return download.cache_or_load_file(
path, lambda path: _make_npz(path, path_caffemodel, model, n_layers),
lambda path: npz.load_npz(path, model))
| 33,339
| 41.74359
| 94
|
py
|
chainer
|
chainer-master/chainer/links/model/vision/vgg.py
|
import collections
import os
import sys
import numpy
try:
from PIL import Image
available = True
except ImportError as e:
available = False
_import_error = e
import chainer
from chainer.dataset.convert import concat_examples
from chainer.dataset import download
from chainer import function
from chainer.functions.activation.relu import relu
from chainer.functions.activation.softmax import softmax
from chainer.functions.array.reshape import reshape
from chainer.functions.math.sum import sum
from chainer.functions.noise.dropout import dropout
from chainer.functions.pooling.max_pooling_nd import max_pooling_2d
from chainer.initializers import constant
from chainer.initializers import normal
from chainer import link
from chainer.links.connection.convolution_2d import Convolution2D
from chainer.links.connection.linear import Linear
from chainer.serializers import npz
from chainer.utils import argument
from chainer.utils import imgproc
from chainer.variable import Variable
class VGGLayers(link.Chain):
"""A pre-trained CNN model provided by VGG team.
You can use ``VGG16Layers`` or ``VGG19Layers`` for concrete
implementations. During initialization, this chain model
automatically downloads the pre-trained caffemodel, convert to
another chainer model, stores it on your local directory,
and initializes all the parameters with it.
This model would be useful when you want to extract a semantic
feature vector from a given image, or fine-tune the model
on a different dataset.
Note that these pre-trained models are released under Creative Commons
Attribution License.
If you want to manually convert the pre-trained caffemodel to a chainer
model that can be specified in the constructor,
please use ``convert_caffemodel_to_npz`` classmethod instead.
See: K. Simonyan and A. Zisserman, `Very Deep Convolutional Networks
for Large-Scale Image Recognition <https://arxiv.org/abs/1409.1556>`_
Args:
pretrained_model (str): the destination of the pre-trained
chainer model serialized as a ``.npz`` file.
If this argument is specified as ``auto``,
it automatically downloads the caffemodel from the internet.
Note that in this case the converted chainer model is stored
on ``$CHAINER_DATASET_ROOT/pfnet/chainer/models`` directory,
where ``$CHAINER_DATASET_ROOT`` is set as
``$HOME/.chainer/dataset`` unless you specify another value
as a environment variable. The converted chainer model is
automatically used from the second time.
If the argument is specified as ``None``, all the parameters
are not initialized by the pre-trained model, but the default
initializer used in the original paper, i.e.,
``chainer.initializers.Normal(scale=0.01)``.
n_layers (int): The number of layers of this model. It should be
either 16 or 19.
Attributes:
available_layers (list of str): The list of available layer names
used by ``forward`` and ``extract`` methods.
"""
def __init__(self, pretrained_model='auto', n_layers=16):
super(VGGLayers, self).__init__()
if pretrained_model:
# As a sampling process is time-consuming,
# we employ a zero initializer for faster computation.
init = constant.Zero()
kwargs = {'initialW': init, 'initial_bias': init}
else:
# employ default initializers used in the original paper
kwargs = {
'initialW': normal.Normal(0.01),
'initial_bias': constant.Zero(),
}
if n_layers not in [16, 19]:
raise ValueError(
'The n_layers argument should be either 16 or 19, '
'but {} was given.'.format(n_layers)
)
with self.init_scope():
self.conv1_1 = Convolution2D(3, 64, 3, 1, 1, **kwargs)
self.conv1_2 = Convolution2D(64, 64, 3, 1, 1, **kwargs)
self.conv2_1 = Convolution2D(64, 128, 3, 1, 1, **kwargs)
self.conv2_2 = Convolution2D(128, 128, 3, 1, 1, **kwargs)
self.conv3_1 = Convolution2D(128, 256, 3, 1, 1, **kwargs)
self.conv3_2 = Convolution2D(256, 256, 3, 1, 1, **kwargs)
self.conv3_3 = Convolution2D(256, 256, 3, 1, 1, **kwargs)
self.conv4_1 = Convolution2D(256, 512, 3, 1, 1, **kwargs)
self.conv4_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv4_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_1 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_2 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_3 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.fc6 = Linear(512 * 7 * 7, 4096, **kwargs)
self.fc7 = Linear(4096, 4096, **kwargs)
self.fc8 = Linear(4096, 1000, **kwargs)
if n_layers == 19:
self.conv3_4 = Convolution2D(256, 256, 3, 1, 1, **kwargs)
self.conv4_4 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
self.conv5_4 = Convolution2D(512, 512, 3, 1, 1, **kwargs)
if pretrained_model == 'auto':
if n_layers == 16:
_retrieve(
'VGG_ILSVRC_16_layers.npz',
'https://www.robots.ox.ac.uk/%7Evgg/software/very_deep/'
'caffe/VGG_ILSVRC_16_layers.caffemodel',
self)
else:
_retrieve(
'VGG_ILSVRC_19_layers.npz',
'http://www.robots.ox.ac.uk/%7Evgg/software/very_deep/'
'caffe/VGG_ILSVRC_19_layers.caffemodel',
self)
elif pretrained_model:
npz.load_npz(pretrained_model, self)
@property
def functions(self):
# This class will not be used directly.
raise NotImplementedError
@property
def available_layers(self):
return list(self.functions.keys())
@classmethod
def convert_caffemodel_to_npz(cls, path_caffemodel, path_npz):
"""Converts a pre-trained caffemodel to a chainer model.
Args:
path_caffemodel (str): Path of the pre-trained caffemodel.
path_npz (str): Path of the converted chainer model.
"""
# As CaffeFunction uses shortcut symbols,
# we import CaffeFunction here.
from chainer.links.caffe.caffe_function import CaffeFunction
caffemodel = CaffeFunction(path_caffemodel)
npz.save_npz(path_npz, caffemodel, compression=False)
def forward(self, x, layers=None, **kwargs):
"""forward(self, x, layers=['prob'])
Computes all the feature maps specified by ``layers``.
Args:
x (~chainer.Variable): Input variable. It should be prepared by
``prepare`` function.
layers (list of str): The list of layer names you want to extract.
If ``None``, 'prob' will be used as layers.
Returns:
Dictionary of ~chainer.Variable: A dictionary in which
the key contains the layer and the value contains the
corresponding feature map variable.
"""
if layers is None:
layers = ['prob']
if kwargs:
argument.check_unexpected_kwargs(
kwargs, test='test argument is not supported anymore. '
'Use chainer.using_config'
)
argument.assert_kwargs_empty(kwargs)
h = x
activations = {}
target_layers = set(layers)
for key, funcs in self.functions.items():
if not target_layers:
break
for func in funcs:
h = func(h)
if key in target_layers:
activations[key] = h
target_layers.remove(key)
return activations
def extract(self, images, layers=None, size=(224, 224), **kwargs):
"""extract(self, images, layers=['fc7'], size=(224, 224))
Extracts all the feature maps of given images.
The difference of directly executing ``forward`` is that
it directly accepts images as an input and automatically
transforms them to a proper variable. That is,
it is also interpreted as a shortcut method that implicitly calls
``prepare`` and ``forward`` functions.
Unlike ``predict`` method, this method does not override
``chainer.config.train`` and ``chainer.config.enable_backprop``
configuration. If you want to extract features without updating
model parameters, you need to manually set configuration when
calling this method as follows:
.. code-block:: python
# model is an instance of VGGLayers (16 or 19 layers)
with chainer.using_config('train', False):
with chainer.using_config('enable_backprop', False):
feature = model.extract([image])
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
layers (list of str): The list of layer names you want to extract.
size (pair of ints): The resolution of resized images used as
an input of CNN. All the given images are not resized
if this argument is ``None``, but the resolutions of
all the images should be the same.
Returns:
Dictionary of ~chainer.Variable: A directory in which
the key contains the layer name and the value contains
the corresponding feature map variable.
"""
if layers is None:
layers = ['fc7']
if kwargs:
argument.check_unexpected_kwargs(
kwargs, test='test argument is not supported anymore. '
'Use chainer.using_config',
volatile='volatile argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
x = concat_examples([prepare(img, size=size) for img in images])
x = Variable(self.xp.asarray(x))
return self(x, layers=layers)
def predict(self, images, oversample=True):
"""Computes all the probabilities of given images.
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
When you specify a color image as a :class:`numpy.ndarray`,
make sure that color order is RGB.
oversample (bool): If ``True``, it averages results across
center, corners, and mirrors. Otherwise, it uses only the
center.
Returns:
~chainer.Variable: Output that contains the class probabilities
of given images.
"""
x = concat_examples([prepare(img, size=(256, 256)) for img in images])
if oversample:
x = imgproc.oversample(x, crop_dims=(224, 224))
else:
x = x[:, :, 16:240, 16:240]
# Use no_backprop_mode to reduce memory consumption
with function.no_backprop_mode(), chainer.using_config('train', False):
x = Variable(self.xp.asarray(x))
y = self(x, layers=['prob'])['prob']
if oversample:
n = len(y) // 10
y_shape = y.shape[1:]
y = reshape(y, (n, 10) + y_shape)
y = sum(y, axis=1) / 10
return y
class VGG16Layers(VGGLayers):
"""A pre-trained CNN model with 16 layers provided by VGG team.
During initialization, this chain model automatically downloads
the pre-trained caffemodel, convert to another chainer model,
stores it on your local directory, and initializes all the parameters
with it. This model would be useful when you want to extract a semantic
feature vector from a given image, or fine-tune the model
on a different dataset.
Note that this pre-trained model is released under Creative Commons
Attribution License.
If you want to manually convert the pre-trained caffemodel to a chainer
model that can be specified in the constructor,
please use ``convert_caffemodel_to_npz`` classmethod instead.
See: K. Simonyan and A. Zisserman, `Very Deep Convolutional Networks
for Large-Scale Image Recognition <https://arxiv.org/abs/1409.1556>`_
Args:
pretrained_model (str): the destination of the pre-trained
chainer model serialized as a ``.npz`` file.
If this argument is specified as ``auto``,
it automatically downloads the caffemodel from the internet.
Note that in this case the converted chainer model is stored
on ``$CHAINER_DATASET_ROOT/pfnet/chainer/models`` directory,
where ``$CHAINER_DATASET_ROOT`` is set as
``$HOME/.chainer/dataset`` unless you specify another value
as a environment variable. The converted chainer model is
automatically used from the second time.
If the argument is specified as ``None``, all the parameters
are not initialized by the pre-trained model, but the default
initializer used in the original paper, i.e.,
``chainer.initializers.Normal(scale=0.01)``.
Attributes:
available_layers (list of str): The list of available layer names
used by ``forward`` and ``extract`` methods.
"""
def __init__(self, pretrained_model='auto'):
super(VGG16Layers, self).__init__(pretrained_model, 16)
@property
def functions(self):
return collections.OrderedDict([
('conv1_1', [self.conv1_1, relu]),
('conv1_2', [self.conv1_2, relu]),
('pool1', [_max_pooling_2d]),
('conv2_1', [self.conv2_1, relu]),
('conv2_2', [self.conv2_2, relu]),
('pool2', [_max_pooling_2d]),
('conv3_1', [self.conv3_1, relu]),
('conv3_2', [self.conv3_2, relu]),
('conv3_3', [self.conv3_3, relu]),
('pool3', [_max_pooling_2d]),
('conv4_1', [self.conv4_1, relu]),
('conv4_2', [self.conv4_2, relu]),
('conv4_3', [self.conv4_3, relu]),
('pool4', [_max_pooling_2d]),
('conv5_1', [self.conv5_1, relu]),
('conv5_2', [self.conv5_2, relu]),
('conv5_3', [self.conv5_3, relu]),
('pool5', [_max_pooling_2d]),
('fc6', [self.fc6, relu, dropout]),
('fc7', [self.fc7, relu, dropout]),
('fc8', [self.fc8]),
('prob', [softmax]),
])
class VGG19Layers(VGGLayers):
"""A pre-trained CNN model with 19 layers provided by VGG team.
During initialization, this chain model automatically downloads
the pre-trained caffemodel, convert to another chainer model,
stores it on your local directory, and initializes all the parameters
with it. This model would be useful when you want to extract a semantic
feature vector from a given image, or fine-tune the model
on a different dataset.
Note that this pre-trained model is released under Creative Commons
Attribution License.
If you want to manually convert the pre-trained caffemodel to a chainer
model that can be specified in the constructor,
please use ``convert_caffemodel_to_npz`` classmethod instead.
See: K. Simonyan and A. Zisserman, `Very Deep Convolutional Networks
for Large-Scale Image Recognition <https://arxiv.org/abs/1409.1556>`_
Args:
pretrained_model (str): the destination of the pre-trained
chainer model serialized as a ``.npz`` file.
If this argument is specified as ``auto``,
it automatically downloads the caffemodel from the internet.
Note that in this case the converted chainer model is stored
on ``$CHAINER_DATASET_ROOT/pfnet/chainer/models`` directory,
where ``$CHAINER_DATASET_ROOT`` is set as
``$HOME/.chainer/dataset`` unless you specify another value
as a environment variable. The converted chainer model is
automatically used from the second time.
If the argument is specified as ``None``, all the parameters
are not initialized by the pre-trained model, but the default
initializer used in the original paper, i.e.,
``chainer.initializers.Normal(scale=0.01)``.
Attributes:
available_layers (list of str): The list of available layer names
used by ``forward`` and ``extract`` methods.
"""
def __init__(self, pretrained_model='auto'):
super(VGG19Layers, self).__init__(pretrained_model, 19)
@property
def functions(self):
return collections.OrderedDict([
('conv1_1', [self.conv1_1, relu]),
('conv1_2', [self.conv1_2, relu]),
('pool1', [_max_pooling_2d]),
('conv2_1', [self.conv2_1, relu]),
('conv2_2', [self.conv2_2, relu]),
('pool2', [_max_pooling_2d]),
('conv3_1', [self.conv3_1, relu]),
('conv3_2', [self.conv3_2, relu]),
('conv3_3', [self.conv3_3, relu]),
('conv3_4', [self.conv3_4, relu]),
('pool3', [_max_pooling_2d]),
('conv4_1', [self.conv4_1, relu]),
('conv4_2', [self.conv4_2, relu]),
('conv4_3', [self.conv4_3, relu]),
('conv4_4', [self.conv4_4, relu]),
('pool4', [_max_pooling_2d]),
('conv5_1', [self.conv5_1, relu]),
('conv5_2', [self.conv5_2, relu]),
('conv5_3', [self.conv5_3, relu]),
('conv5_4', [self.conv5_4, relu]),
('pool5', [_max_pooling_2d]),
('fc6', [self.fc6, relu, dropout]),
('fc7', [self.fc7, relu, dropout]),
('fc8', [self.fc8]),
('prob', [softmax]),
])
def prepare(image, size=(224, 224)):
"""Converts the given image to the numpy array for VGG models.
Note that you have to call this method before ``forward``
because the pre-trained vgg model requires to resize the given image,
covert the RGB to the BGR, subtract the mean,
and permute the dimensions before calling.
Args:
image (PIL.Image or numpy.ndarray): Input image.
If an input is ``numpy.ndarray``, its shape must be
``(height, width)``, ``(height, width, channels)``,
or ``(channels, height, width)``, and
the order of the channels must be RGB.
size (pair of ints): Size of converted images.
If ``None``, the given image is not resized.
Returns:
numpy.ndarray: The converted output array.
"""
if not available:
raise ImportError('PIL cannot be loaded. Install Pillow!\n'
'The actual import error is as follows:\n' +
str(_import_error))
dtype = chainer.get_dtype()
if isinstance(image, numpy.ndarray):
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0, :, :]
elif image.shape[0] == 3:
image = image.transpose((1, 2, 0))
image = Image.fromarray(image.astype(numpy.uint8))
image = image.convert('RGB')
if size:
image = image.resize(size)
image = numpy.asarray(image, dtype=dtype)
image = image[:, :, ::-1]
image -= numpy.array(
[103.939, 116.779, 123.68], dtype=dtype)
image = image.transpose((2, 0, 1))
return image
def _max_pooling_2d(x):
return max_pooling_2d(x, ksize=2)
def _make_npz(path_npz, url, model):
path_caffemodel = download.cached_download(url)
sys.stderr.write(
'Now loading caffemodel (usually it may take few minutes)\n')
sys.stderr.flush()
VGGLayers.convert_caffemodel_to_npz(path_caffemodel, path_npz)
npz.load_npz(path_npz, model)
return model
def _retrieve(name, url, model):
root = download.get_dataset_directory('pfnet/chainer/models/')
path = os.path.join(root, name)
return download.cache_or_load_file(
path, lambda path: _make_npz(path, url, model),
lambda path: npz.load_npz(path, model))
| 20,475
| 39.546535
| 79
|
py
|
chainer
|
chainer-master/chainer/links/model/vision/googlenet.py
|
import collections
import os
import sys
import numpy
try:
from PIL import Image
available = True
except ImportError as e:
available = False
_import_error = e
import chainer
from chainer.dataset.convert import concat_examples
from chainer.dataset import download
from chainer import function
from chainer.functions.activation.relu import relu
from chainer.functions.activation.softmax import softmax
from chainer.functions.array.reshape import reshape
from chainer.functions.math.average import average
from chainer.functions.noise.dropout import dropout
from chainer.functions.normalization.local_response_normalization import (
local_response_normalization)
from chainer.functions.pooling.average_pooling_2d import average_pooling_2d
from chainer.functions.pooling.max_pooling_nd import max_pooling_2d
from chainer.initializers import constant
from chainer.initializers import uniform
from chainer import link
from chainer.links.connection.convolution_2d import Convolution2D
from chainer.links.connection.inception import Inception
from chainer.links.connection.linear import Linear
from chainer.serializers import npz
from chainer.utils import argument
from chainer.utils import imgproc
from chainer.variable import Variable
class GoogLeNet(link.Chain):
"""A pre-trained GoogLeNet model provided by BVLC.
When you specify the path of the pre-trained chainer model serialized as
a ``.npz`` file in the constructor, this chain model automatically
initializes all the parameters with it.
This model would be useful when you want to extract a semantic feature
vector per image, or fine-tune the model on a different dataset.
If you want to manually convert the pre-trained caffemodel to a chainer
model that can be specified in the constructor,
please use ``convert_caffemodel_to_npz`` classmethod instead.
GoogLeNet, which is also called Inception-v1, is an architecture of
convolutional neural network proposed in 2014. This model is relatively
lightweight and requires small memory footprint during training compared
with modern architectures such as ResNet. Therefore, if you fine-tune your
network based on a model pre-trained by Imagenet and need to train it with
large batch size, GoogLeNet may be useful. On the other hand, if you just
want an off-the-shelf classifier, we recommend that you use ResNet50 or
other models since they are more accurate than GoogLeNet.
The original model is provided here:
`<https://github.com/BVLC/caffe/tree/master/models/bvlc_googlenet>`_
Args:
pretrained_model (str): the destination of the pre-trained
chainer model serialized as a ``.npz`` file.
If this argument is specified as ``auto``,
it automatically downloads the caffemodel from the internet.
Note that in this case the converted chainer model is stored
on ``$CHAINER_DATASET_ROOT/pfnet/chainer/models`` directory,
where ``$CHAINER_DATASET_ROOT`` is set as
``$HOME/.chainer/dataset`` unless you specify another value
as a environment variable. The converted chainer model is
automatically used from the second time.
If the argument is specified as ``None``, all the parameters
are not initialized by the pre-trained model, but the default
initializer used in BVLC, i.e.,
``chainer.initializers.LeCunUniform(scale=1.0)``.
Note that, in Caffe, when weight_filler is specified as
"xavier" type without variance_norm parameter, the weights are
initialized by Uniform(-s, s), where
:math:`s = \\sqrt{\\frac{3}{fan_{in}}}` and :math:`fan_{in}` is the
number of input units. This corresponds to LeCunUniform in Chainer
but not GlorotUniform.
Attributes:
available_layers (list of str): The list of available layer names
used by ``forward`` and ``extract`` methods.
"""
def __init__(self, pretrained_model='auto'):
super(GoogLeNet, self).__init__()
if pretrained_model:
# As a sampling process is time-consuming,
# we employ a zero initializer for faster computation.
kwargs = {'initialW': constant.Zero()}
else:
# employ default initializers used in BVLC. For more detail, see
# https://github.com/chainer/chainer/pull/2424#discussion_r109642209
kwargs = {'initialW': uniform.LeCunUniform(scale=1.0)}
with self.init_scope():
self.conv1 = Convolution2D(3, 64, 7, stride=2, pad=3, **kwargs)
self.conv2_reduce = Convolution2D(64, 64, 1, **kwargs)
self.conv2 = Convolution2D(64, 192, 3, stride=1, pad=1, **kwargs)
self.inc3a = Inception(192, 64, 96, 128, 16, 32, 32)
self.inc3b = Inception(256, 128, 128, 192, 32, 96, 64)
self.inc4a = Inception(480, 192, 96, 208, 16, 48, 64)
self.inc4b = Inception(512, 160, 112, 224, 24, 64, 64)
self.inc4c = Inception(512, 128, 128, 256, 24, 64, 64)
self.inc4d = Inception(512, 112, 144, 288, 32, 64, 64)
self.inc4e = Inception(528, 256, 160, 320, 32, 128, 128)
self.inc5a = Inception(832, 256, 160, 320, 32, 128, 128)
self.inc5b = Inception(832, 384, 192, 384, 48, 128, 128)
self.loss3_fc = Linear(1024, 1000, **kwargs)
self.loss1_conv = Convolution2D(512, 128, 1, **kwargs)
self.loss1_fc1 = Linear(2048, 1024, **kwargs)
self.loss1_fc2 = Linear(1024, 1000, **kwargs)
self.loss2_conv = Convolution2D(528, 128, 1, **kwargs)
self.loss2_fc1 = Linear(2048, 1024, **kwargs)
self.loss2_fc2 = Linear(1024, 1000, **kwargs)
if pretrained_model == 'auto':
_retrieve(
'bvlc_googlenet.npz',
'http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel',
self)
elif pretrained_model:
npz.load_npz(pretrained_model, self)
@property
def functions(self):
return collections.OrderedDict([
('conv1', [self.conv1, relu]),
('pool1', [_max_pooling_2d, _local_response_normalization]),
('conv2_reduce', [self.conv2_reduce, relu]),
('conv2', [self.conv2, relu, _local_response_normalization]),
('pool2', [_max_pooling_2d]),
('inception_3a', [self.inc3a]),
('inception_3b', [self.inc3b]),
('pool3', [_max_pooling_2d]),
('inception_4a', [self.inc4a]),
('inception_4b', [self.inc4b]),
('inception_4c', [self.inc4c]),
('inception_4d', [self.inc4d]),
('inception_4e', [self.inc4e]),
('pool4', [_max_pooling_2d]),
('inception_5a', [self.inc5a]),
('inception_5b', [self.inc5b]),
('pool5', [_average_pooling_2d_k7]),
('loss3_fc', [_dropout, self.loss3_fc]),
('prob', [softmax]),
# Since usually the following outputs are not used, they are put
# after 'prob' to be skipped for efficiency.
('loss1_fc2', [_average_pooling_2d_k5, self.loss1_conv, relu,
self.loss1_fc1, relu, self.loss1_fc2]),
('loss2_fc2', [_average_pooling_2d_k5, self.loss2_conv, relu,
self.loss2_fc1, relu, self.loss2_fc2])
])
@property
def available_layers(self):
return list(self.functions.keys())
@classmethod
def convert_caffemodel_to_npz(cls, path_caffemodel, path_npz):
"""Converts a pre-trained caffemodel to a chainer model.
Args:
path_caffemodel (str): Path of the pre-trained caffemodel.
path_npz (str): Path of the converted chainer model.
"""
# As CaffeFunction uses shortcut symbols,
# we import CaffeFunction here.
from chainer.links.caffe.caffe_function import CaffeFunction
caffemodel = CaffeFunction(path_caffemodel)
chainermodel = cls(pretrained_model=None)
_transfer_googlenet(caffemodel, chainermodel)
npz.save_npz(path_npz, chainermodel, compression=False)
def forward(self, x, layers=None, **kwargs):
"""forward(self, x, layers=['prob'])
Computes all the feature maps specified by ``layers``.
Args:
x (~chainer.Variable): Input variable. It should be prepared by
``prepare`` function.
layers (list of str): The list of layer names you want to extract.
Returns:
Dictionary of ~chainer.Variable: A directory in which
the key contains the layer name and the value contains
the corresponding feature map variable.
"""
if layers is None:
layers = ['prob']
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
h = x
activations = {}
inception_4a_cache = None
inception_4d_cache = None
target_layers = set(layers)
for key, funcs in self.functions.items():
if not target_layers:
break
if key == 'loss1_fc2':
h = inception_4a_cache
elif key == 'loss2_fc2':
h = inception_4d_cache
for func in funcs:
h = func(h)
if key in target_layers:
activations[key] = h
target_layers.remove(key)
if key == 'inception_4a':
inception_4a_cache = h
elif key == 'inception_4d':
inception_4d_cache = h
return activations
def extract(self, images, layers=None, size=(224, 224), **kwargs):
"""extract(self, images, layers=['pool5'], size=(224, 224))
Extracts all the feature maps of given images.
The difference of directly executing ``forward`` is that
it directly accepts images as an input and automatically
transforms them to a proper variable. That is,
it is also interpreted as a shortcut method that implicitly calls
``prepare`` and ``forward`` functions.
Unlike ``predict`` method, this method does not override
``chainer.config.train`` and ``chainer.config.enable_backprop``
configuration. If you want to extract features without updating
model parameters, you need to manually set configuration when
calling this method as follows:
.. code-block:: python
# model is an instance of `GoogLeNet`
with chainer.using_config('train', False):
with chainer.using_config('enable_backprop', False):
feature = model.extract([image])
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
layers (list of str): The list of layer names you want to extract.
size (pair of ints): The resolution of resized images used as
an input of CNN. All the given images are not resized
if this argument is ``None``, but the resolutions of
all the images should be the same.
Returns:
Dictionary of ~chainer.Variable: A directory in which
the key contains the layer name and the value contains
the corresponding feature map variable.
"""
if layers is None:
layers = ['pool5']
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config',
volatile='volatile argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
x = concat_examples([prepare(img, size=size) for img in images])
x = Variable(self.xp.asarray(x))
return self(x, layers=layers)
def predict(self, images, oversample=True):
"""Computes all the probabilities of given images.
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
When you specify a color image as a :class:`numpy.ndarray`,
make sure that color order is RGB.
oversample (bool): If ``True``, it averages results across
center, corners, and mirrors. Otherwise, it uses only the
center.
Returns:
~chainer.Variable: Output that contains the class probabilities
of given images.
"""
x = concat_examples([prepare(img, size=(256, 256)) for img in images])
if oversample:
x = imgproc.oversample(x, crop_dims=(224, 224))
else:
x = x[:, :, 16:240, 16:240]
# Use no_backprop_mode to reduce memory consumption
with function.no_backprop_mode(), chainer.using_config('train', False):
x = Variable(self.xp.asarray(x))
y = self(x, layers=['prob'])['prob']
if oversample:
n = len(y) // 10
y_shape = y.shape[1:]
y = reshape(y, (n, 10) + y_shape)
y = average(y, axis=1)
return y
def prepare(image, size=(224, 224)):
"""Converts the given image to the numpy array for GoogLeNet.
Note that you have to call this method before ``forward``
because the pre-trained GoogLeNet model requires to resize the given
image, covert the RGB to the BGR, subtract the mean,
and permute the dimensions before calling.
Args:
image (PIL.Image or numpy.ndarray): Input image.
If an input is ``numpy.ndarray``, its shape must be
``(height, width)``, ``(height, width, channels)``,
or ``(channels, height, width)``, and
the order of the channels must be RGB.
size (pair of ints): Size of converted images.
If ``None``, the given image is not resized.
Returns:
numpy.ndarray: The converted output array.
"""
if not available:
raise ImportError('PIL cannot be loaded. Install Pillow!\n'
'The actual import error is as follows:\n' +
str(_import_error))
dtype = chainer.get_dtype()
if isinstance(image, numpy.ndarray):
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0, :, :]
elif image.shape[0] == 3:
image = image.transpose((1, 2, 0))
image = Image.fromarray(image.astype(numpy.uint8))
image = image.convert('RGB')
if size:
image = image.resize(size)
image = numpy.asarray(image, dtype=dtype)
image = image[:, :, ::-1]
image -= numpy.array([104.0, 117.0, 123.0], dtype=dtype) # BGR
image = image.transpose((2, 0, 1))
return image
def _transfer_inception(src, dst, names):
for name in names:
chain = getattr(dst, 'inc{}'.format(name))
src_prefix = 'inception_{}/'.format(name)
chain.conv1.W.array[:] = src[src_prefix + '1x1'].W.array
chain.conv1.b.array[:] = src[src_prefix + '1x1'].b.array
chain.proj3.W.array[:] = src[src_prefix + '3x3_reduce'].W.array
chain.proj3.b.array[:] = src[src_prefix + '3x3_reduce'].b.array
chain.conv3.W.array[:] = src[src_prefix + '3x3'].W.array
chain.conv3.b.array[:] = src[src_prefix + '3x3'].b.array
chain.proj5.W.array[:] = src[src_prefix + '5x5_reduce'].W.array
chain.proj5.b.array[:] = src[src_prefix + '5x5_reduce'].b.array
chain.conv5.W.array[:] = src[src_prefix + '5x5'].W.array
chain.conv5.b.array[:] = src[src_prefix + '5x5'].b.array
chain.projp.W.array[:] = src[src_prefix + 'pool_proj'].W.array
chain.projp.b.array[:] = src[src_prefix + 'pool_proj'].b.array
def _transfer_googlenet(src, dst):
# 1 #################################################################
dst.conv1.W.array[:] = src['conv1/7x7_s2'].W.array
dst.conv1.b.array[:] = src['conv1/7x7_s2'].b.array
# 2 #################################################################
dst.conv2_reduce.W.array[:] = src['conv2/3x3_reduce'].W.array
dst.conv2_reduce.b.array[:] = src['conv2/3x3_reduce'].b.array
dst.conv2.W.array[:] = src['conv2/3x3'].W.array
dst.conv2.b.array[:] = src['conv2/3x3'].b.array
# 3, 4, 5 ###########################################################
_transfer_inception(src, dst, ['3a', '3b',
'4a', '4b', '4c', '4d', '4e',
'5a', '5b'])
# outputs ############################################################
dst.loss1_conv.W.array[:] = src['loss1/conv'].W.array
dst.loss1_conv.b.array[:] = src['loss1/conv'].b.array
dst.loss1_fc1.W.array[:] = src['loss1/fc'].W.array
dst.loss1_fc1.b.array[:] = src['loss1/fc'].b.array
dst.loss1_fc2.W.array[:] = src['loss1/classifier'].W.array
dst.loss1_fc2.b.array[:] = src['loss1/classifier'].b.array
dst.loss2_conv.W.array[:] = src['loss2/conv'].W.array
dst.loss2_conv.b.array[:] = src['loss2/conv'].b.array
dst.loss2_fc1.W.array[:] = src['loss2/fc'].W.array
dst.loss2_fc1.b.array[:] = src['loss2/fc'].b.array
dst.loss2_fc2.W.array[:] = src['loss2/classifier'].W.array
dst.loss2_fc2.b.array[:] = src['loss2/classifier'].b.array
dst.loss3_fc.W.array[:] = src['loss3/classifier'].W.array
dst.loss3_fc.b.array[:] = src['loss3/classifier'].b.array
def _max_pooling_2d(x):
return max_pooling_2d(x, ksize=3, stride=2)
def _local_response_normalization(x):
return local_response_normalization(x, n=5, k=1, alpha=1e-4 / 5)
def _average_pooling_2d_k5(x):
return average_pooling_2d(x, ksize=5, stride=3)
def _average_pooling_2d_k7(x):
return average_pooling_2d(x, ksize=7, stride=1)
def _dropout(x):
return dropout(x, ratio=0.4)
def _make_npz(path_npz, url, model):
path_caffemodel = download.cached_download(url)
sys.stderr.write(
'Now loading caffemodel (usually it may take few minutes)\n')
sys.stderr.flush()
GoogLeNet.convert_caffemodel_to_npz(path_caffemodel, path_npz)
npz.load_npz(path_npz, model)
return model
def _retrieve(name_npz, url, model):
root = download.get_dataset_directory('pfnet/chainer/models/')
path = os.path.join(root, name_npz)
return download.cache_or_load_file(
path, lambda path: _make_npz(path, url, model),
lambda path: npz.load_npz(path, model))
| 18,874
| 40.032609
| 80
|
py
|
chainer
|
chainer-master/chainer/links/model/vision/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/chainer/training/extension.py
|
from chainer.utils import argument
PRIORITY_WRITER = 300
PRIORITY_EDITOR = 200
PRIORITY_READER = 100
class Extension(object):
"""Base class of trainer extensions.
Extension of :class:`Trainer` is a callable object that takes the trainer
object as the argument. It also provides some default configurations as its
attributes, e.g. the default trigger and the default priority. This class
provides a set of typical default values for these attributes.
There are three ways to define users' own extensions: inheriting this
class, decorating closures by :func:`make_extension`, or using any callable
including lambda functions as extensions. Decorator can slightly reduce the
overhead and is much easier to use, while this class provides more
flexibility (for example, it can have methods to configure the behavior).
Using a lambda function allows one-line coding for simple purposes, but
users have to specify the configurations as arguments to
:meth:`Trainer.extend`. For a callable not inheriting this class, the
default configurations of this class are used unless the user explicitly
specifies them in :meth:`Trainer.extend` method.
Attributes:
trigger: Default value of trigger for this extension. It is set to
``(1, 'iteration')`` by default.
priority: Default priority of the extension. It is set to
``PRIORITY_READER`` by default.
~Extension.name: Name of the extension. It is set to
``None`` by default. This value will be overwritten when
registering an extension to a trainer. See
:meth:`chainer.training.Trainer.extend` for details.
"""
trigger = 1, 'iteration'
priority = PRIORITY_READER
name = None
@property
def default_name(self):
"""Default name of the extension.
It is the name of the class by default. Implementation can override
this property, or provide a class attribute to hide it.
"""
return type(self).__name__
def __call__(self, trainer):
"""Invokes the extension.
Implementations should override this operator. This method is called
at iterations which the corresponding trigger accepts.
Args:
trainer (Trainer): Trainer object that calls this operator.
"""
raise NotImplementedError(
'Extension implementation must override __call__.')
def __getattr__(self, name):
if name == 'invoke_before_training':
raise AttributeError(
'invoke_before_training has been removed since Chainer '
'v2.0.0. Use Extension.initialize instead.')
raise AttributeError('{} object has no attribute {}'.format(
type(self).__name__, name))
def finalize(self):
"""Finalizes the extension.
This method is called at the end of the training loop.
"""
pass
def initialize(self, trainer):
"""Initializes up the trainer state.
This method is called before entering the training loop. An extension
that modifies the state of :class:`~chainer.training.Trainer` can
override this method to initialize it.
When the trainer has been restored from a snapshot, this method has to
recover an appropriate part of the state of the trainer.
For example, :class:`~chainer.training.extensions.ExponentialShift`
extension changes the optimizer's hyperparameter at each invocation.
Note that the hyperparameter is not saved to the snapshot; it is the
responsibility of the extension to recover the hyperparameter.
The :class:`~chainer.training.extensions.ExponentialShift` extension
recovers it in its ``initialize`` method if it has been loaded from a
snapshot, or just setting the initial value otherwise.
Args:
trainer (Trainer): Trainer object that runs the training loop.
"""
pass
def on_error(self, trainer, exc, tb):
"""Handles the error raised during training before finalization.
This method is called when an exception is thrown during the
training loop, before finalize. An extension that needs
different error handling from finalize, can override this
method to handle errors.
Args:
trainer (Trainer): Trainer object that runs the training loop.
exc (Exception): arbitrary exception thrown during update loop.
tb (traceback): traceback object of the exception
"""
pass
def serialize(self, serializer):
"""Serializes the extension state.
It is called when a trainer that owns this extension is serialized. It
serializes nothing by default.
"""
pass
def make_extension(trigger=None, default_name=None, priority=None,
finalizer=None, initializer=None, on_error=None, **kwargs):
"""Decorator to make given functions into trainer extensions.
This decorator just adds some attributes to a given function. The value of
the attributes are given by the arguments of this decorator.
See :class:`Extension` for details of trainer extensions. Most of the
default values of arguments also follow those for this class.
Args:
trigger: Default trigger of the extension.
default_name: Default name of the extension. The name of a given
function is used by default.
priority (int): Default priority of the extension.
finalizer: Finalizer function of this extension. It is
called at the end of the training loop.
initializer: Initializer function of this extension. It is called at
the beginning of the training loop.
on_error: Error handler callback function of this extension. It is
called after an error is raised during the trainer loop.
"""
if kwargs:
msg = ('invoke_before_training has been removed since Chainer v2.0.0. '
'Use initializer= instead.')
argument.check_unexpected_kwargs(kwargs, invoke_before_training=msg)
argument.assert_kwargs_empty(kwargs)
if trigger is None:
trigger = Extension.trigger
if priority is None:
priority = Extension.priority
def decorator(ext):
ext.trigger = trigger
ext.default_name = default_name or ext.__name__
ext.priority = priority
ext.finalize = finalizer
ext.on_error = on_error
ext.initialize = initializer
return ext
return decorator
| 6,662
| 36.644068
| 79
|
py
|
chainer
|
chainer-master/chainer/training/trigger.py
|
# For backward compatibility
from chainer.training.triggers.interval_trigger import IntervalTrigger # NOQA
from chainer.training.util import _never_fire_trigger # NOQA
from chainer.training.util import get_trigger # NOQA
| 224
| 44
| 78
|
py
|
chainer
|
chainer-master/chainer/training/updater.py
|
from chainer.training._updater import Updater # NOQA
# For backward compatibility
from chainer.training.updaters.parallel_updater import ParallelUpdater # NOQA
from chainer.training.updaters.standard_updater import StandardUpdater # NOQA
| 242
| 39.5
| 78
|
py
|
chainer
|
chainer-master/chainer/training/util.py
|
from chainer.training.triggers import interval_trigger
def get_trigger(trigger):
"""Gets a trigger object.
Trigger object is a callable that accepts a
:class:`~chainer.training.Trainer` object as an argument and returns a
boolean value. When it returns True, various kinds of events can occur
depending on the context in which the trigger is used. For example, if the
trigger is passed to the :class:`~chainer.training.Trainer` as the `stop
trigger`, the training loop breaks when the trigger returns True. If the
trigger is passed to the :meth:`~chainer.training.Trainer.extend` method of
a trainer, then the registered extension is invoked only when the trigger
returns True.
This function returns a trigger object based on the argument.
If ``trigger`` is already a callable, it just returns the trigger. If
``trigger`` is ``None``, it returns a trigger that never fires. Otherwise,
it passes the value to :class:`~chainer.training.triggers.IntervalTrigger`.
Args:
trigger: Trigger object. It can be either an already built trigger
object (i.e., a callable object that accepts a trainer object and
returns a bool value), or a tuple. In latter case, the tuple is
passed to :class:`~chainer.training.triggers.IntervalTrigger`.
Returns:
``trigger`` if it is a callable, otherwise a
:class:`~chainer.training.triggers.IntervalTrigger`
object made from ``trigger``.
"""
if callable(trigger):
return trigger
elif trigger is None:
return _never_fire_trigger
else:
return interval_trigger.IntervalTrigger(*trigger)
def _never_fire_trigger(trainer):
return False
| 1,741
| 38.590909
| 79
|
py
|
chainer
|
chainer-master/chainer/training/_updater.py
|
class Updater(object):
"""Interface of updater objects for trainers.
:class:`~chainer.training.Updater` implements a training iteration
as :meth:`update`. Typically, the updating iteration proceeds as follows.
- Fetch a minibatch from :mod:`~chainer.dataset`
via :class:`~chainer.dataset.Iterator`.
- Run forward and backward process of :class:`~chainer.Chain`.
- Update parameters according to their :class:`~chainer.UpdateRule`.
The first line is processed by
:meth:`Iterator.__next__ <chainer.dataset.Iterator.__next__>`.
The second and third are processed by
:meth:`Optimizer.update <chainer.Optimizer.update>`.
Users can also implement their original updating iteration by overriding
:meth:`Updater.update <chainer.training.Updater.update>`.
"""
def connect_trainer(self, trainer):
"""Connects the updater to the trainer that will call it.
The typical usage of this method is to register additional links to the
reporter of the trainer. This method is called at the end of the
initialization of :class:`~chainer.training.Trainer`. The default
implementation does nothing.
Args:
trainer (~chainer.training.Trainer): Trainer object to which the
updater is registered.
"""
pass
def finalize(self):
"""Finalizes the updater object.
This method is called at the end of training loops. It should finalize
each dataset iterator used in this updater.
"""
raise NotImplementedError
def get_optimizer(self, name):
"""Gets the optimizer of given name.
Updater holds one or more optimizers with names. They can be retrieved
by this method.
Args:
name (str): Name of the optimizer.
Returns:
~chainer.Optimizer: Optimizer of the name.
"""
raise NotImplementedError
def get_all_optimizers(self):
"""Gets a dictionary of all optimizers for this updater.
Returns:
dict: Dictionary that maps names to optimizers.
"""
raise NotImplementedError
def update(self):
"""Updates the parameters of the target model.
This method implements an update formula for the training task,
including data loading, forward/backward computations, and actual
updates of parameters.
This method is called once at each iteration of the training loop.
"""
raise NotImplementedError
def serialize(self, serializer):
"""Serializes the current state of the updater object."""
raise NotImplementedError
| 2,697
| 30.741176
| 79
|
py
|
chainer
|
chainer-master/chainer/training/__init__.py
|
from chainer.training import extensions # NOQA
from chainer.training import triggers # NOQA
from chainer.training import updaters # NOQA
from chainer.training import util # NOQA
# import classes and functions
from chainer.training.extension import Extension # NOQA
from chainer.training.extension import make_extension # NOQA
from chainer.training.extension import PRIORITY_EDITOR # NOQA
from chainer.training.extension import PRIORITY_READER # NOQA
from chainer.training.extension import PRIORITY_WRITER # NOQA
from chainer.training.trainer import Trainer # NOQA
from chainer.training.trigger import get_trigger # NOQA
from chainer.training.trigger import IntervalTrigger # NOQA
from chainer.training.updater import ParallelUpdater # NOQA
from chainer.training.updater import StandardUpdater # NOQA
from chainer.training.updater import Updater # NOQA
| 868
| 47.277778
| 62
|
py
|
chainer
|
chainer-master/chainer/training/trainer.py
|
import collections
import os
import sys
import time
import traceback
import six
from chainer import reporter as reporter_module
from chainer import serializer as serializer_module
from chainer.training import extension as extension_module
from chainer.training import trigger as trigger_module
from chainer.utils import argument
# Select the best-resolution timer function
try:
_get_time = time.perf_counter
except AttributeError:
if os.name == 'nt':
_get_time = time.clock
else:
_get_time = time.time
class _ExtensionEntry(object):
def __init__(self, extension, priority, trigger, call_before_training):
self.extension = extension
self.trigger = trigger
self.priority = priority
self.call_before_training = call_before_training
class Trainer(object):
"""The standard training loop in Chainer.
Trainer is an implementation of a training loop. Users can invoke the
training by calling the :meth:`run` method.
Each iteration of the training loop proceeds as follows.
- Update of the parameters. It includes the mini-batch loading, forward
and backward computations, and an execution of the update formula.
These are all done by the update object held by the trainer.
- Invocation of trainer extensions in the descending order of their
priorities. A trigger object is attached to each extension, and it
decides at each iteration whether the extension should be executed.
Trigger objects are callable objects that take the trainer object as the
argument and return a boolean value indicating whether the extension
should be called or not.
Extensions are callable objects that take the trainer object as the
argument. There are three ways to define custom extensions: inheriting the
:class:`Extension` class, decorating functions by :func:`make_extension`,
and defining any callable including lambda functions. See
:class:`Extension` for more details on custom extensions and how to
configure them.
Users can register extensions to the trainer by calling the :meth:`extend`
method, where some configurations can be added.
- Trigger object, which is also explained above. In most cases,
:class:`IntervalTrigger` is used, in which case users can simply specify
a tuple of the interval length and its unit, like
``(1000, 'iteration')`` or ``(1, 'epoch')``.
- The order of execution of extensions is determined by their priorities.
Extensions of higher priorities are invoked earlier. There are three
standard values for the priorities:
- ``PRIORITY_WRITER``. This is the priority for extensions that write
some records to the :attr:`observation` dictionary. It includes cases
that the extension directly adds values to the observation dictionary,
or the extension uses the :func:`chainer.report` function to report
values to the observation dictionary.
- ``PRIORITY_EDITOR``. This is the priority for extensions that edit the
:attr:`observation` dictionary based on already reported values.
- ``PRIORITY_READER``. This is the priority for extensions that only read
records from the :attr:`observation` dictionary. This is also suitable
for extensions that do not use the :attr:`observation` dictionary at
all.
The current state of the trainer object and objects handled by the trainer
can be serialized through the standard serialization protocol of Chainer.
It enables us to easily suspend and resume the training loop.
.. code-block:: python
>>> serializers.save_npz('my.trainer', trainer) # To suspend and save
>>> serializers.load_npz('my.trainer', trainer) # To load and resume
The :meth:`~chainer.training.extensions.snapshot` method makes regular
snapshots of the :class:`~chainer.training.Trainer` object during training.
.. note::
The serialization does not recover everything of the training loop. It
only recovers the states which change over the training (e.g.
parameters, optimizer states, the batch iterator state, extension
states, etc.). You must initialize the objects correctly before
deserializing the states.
On the other hand, it means that users can change the settings on
deserialization. For example, the exit condition can be changed on the
deserialization, so users can train the model for some iterations,
suspend it, and then resume it with larger number of total iterations.
During the training, it also creates a :class:`~chainer.Reporter` object to
store observed values on each update. For each iteration, it creates a
fresh observation dictionary and stores it in the :attr:`observation`
attribute.
Links of the target model of each optimizer are registered to the reporter
object as observers, where the name of each observer is constructed as the
format ``<optimizer name><link name>``. The link name is given by the
:meth:`chainer.Link.namedlink` method, which represents the path to each
link in the hierarchy. Other observers can be registered by accessing the
reporter object via the :attr:`reporter` attribute.
The default trainer is `plain`, i.e., it does not contain any extensions.
Args:
updater (~chainer.training.Updater): Updater object. It defines how to
update the models.
stop_trigger: Trigger that determines when to stop the training loop.
If it is not callable, it is passed to :class:`IntervalTrigger`.
out: Output directory.
extensions: Extensions registered to the trainer.
Attributes:
updater: The updater object for this trainer.
stop_trigger: Trigger that determines when to stop the training loop.
The training loop stops at the iteration on which this trigger
returns ``True``.
observation: Observation of values made at the last update. See the
:class:`Reporter` class for details.
out: Output directory.
reporter: Reporter object to report observed values.
"""
def __init__(self, updater, stop_trigger=None, out='result',
extensions=None):
self.updater = updater
self.stop_trigger = trigger_module.get_trigger(stop_trigger)
self.observation = {}
self.out = out
if extensions is None:
extensions = []
reporter = reporter_module.Reporter()
for name, optimizer in six.iteritems(updater.get_all_optimizers()):
reporter.add_observer(name, optimizer.target)
reporter.add_observers(
name, optimizer.target.namedlinks(skipself=True))
self.reporter = reporter
self._done = False
self._extensions = collections.OrderedDict()
self._start_at = None
self._snapshot_elapsed_time = 0.0
self._final_elapsed_time = None
updater.connect_trainer(self)
for ext in extensions:
self.extend(ext)
@property
def is_before_training(self):
"""Flag that represents if training has started or not.
``True`` represents 'before training' and
``False`` represents 'during/after training'.
This flag is supposed to be used in :meth:`Extension.__call__`
(e.g., :meth:`PlotReport.__call__`) to decide to execute its operation
or not. This additional condition is necessary since
``Extension._trigger(trainer)`` is always ``False`` before training
and cannot be used.
"""
return self.updater.iteration == 0
@property
def elapsed_time(self):
"""Total time used for the training.
The time is in seconds. If the training is resumed from snapshot, it
includes the time of all the previous training to get the current
state of the trainer.
"""
if self._done:
return self._final_elapsed_time
if self._start_at is None:
raise RuntimeError('training has not been started yet')
return _get_time() - self._start_at + self._snapshot_elapsed_time
def extend(self, extension, name=None, trigger=None, priority=None,
*, call_before_training=False, **kwargs):
"""Registers an extension to the trainer.
:class:`Extension` is a callable object which is called after each
update unless the corresponding trigger object decides to skip the
iteration. The order of execution is determined by priorities:
extensions with higher priorities are called earlier in each iteration.
Extensions with the same priority are invoked in the order of
registrations.
If two or more extensions with the same name are registered, suffixes
are added to the names of the second to last extensions. The suffix is
``_N`` where N is the ordinal of the extensions.
See :class:`Extension` for the interface of extensions.
Args:
extension: Extension to register.
name (str): Name of the extension. If it is omitted, the
:attr:`Extension.name` attribute of the extension is used or
the :attr:`Extension.default_name` attribute of the extension
if `name` is is set to `None` or is undefined.
Note that the name would be suffixed by an ordinal in case of
duplicated names as explained above.
trigger (tuple or Trigger): Trigger object that determines when to
invoke the extension. If it is ``None``, ``extension.trigger``
is used instead. If it is ``None`` and the extension does not
have the trigger attribute, the extension is triggered at every
iteration by default. If the trigger is not callable, it is
passed to :class:`IntervalTrigger` to build an interval
trigger.
call_before_training (bool): Flag to call extension before
training. Default is ``False``.
priority (int): Invocation priority of the extension. Extensions
are invoked in the descending order of priorities in each
iteration. If this is ``None``, ``extension.priority`` is used
instead.
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs,
invoke_before_training='invoke_before_training has been '
'removed since Chainer v2.0.0. Use initializer= instead.')
argument.assert_kwargs_empty(kwargs)
if name is None:
name = getattr(extension, 'name', None)
if name is None:
name = getattr(extension, 'default_name', None)
if name is None:
name = getattr(extension, '__name__', None)
if name is None:
raise TypeError('name is not given for the extension')
if name == 'training':
raise ValueError(
'the name "training" is prohibited as an extension name')
if trigger is None:
trigger = getattr(extension, 'trigger', (1, 'iteration'))
trigger = trigger_module.get_trigger(trigger)
if priority is None:
priority = getattr(
extension, 'priority', extension_module.PRIORITY_READER)
modified_name = name
ordinal = 0
while modified_name in self._extensions:
ordinal += 1
modified_name = '%s_%d' % (name, ordinal)
extension.name = modified_name
self._extensions[modified_name] = _ExtensionEntry(
extension, priority, trigger, call_before_training)
def get_extension(self, name):
"""Returns the extension of a given name.
Args:
name (str): Name of the extension.
Returns:
Extension.
"""
extensions = self._extensions
if name in extensions:
return extensions[name].extension
else:
raise ValueError('extension %s not found' % name)
def run(self, show_loop_exception_msg=True):
"""Executes the training loop.
This method is the core of ``Trainer``. It executes the whole loop of
training the models.
Note that this method cannot run multiple times for one trainer object.
"""
if self._done:
raise RuntimeError('cannot run training loop multiple times')
try:
os.makedirs(self.out)
except OSError:
pass
# sort extensions by priorities
extension_order = sorted(
self._extensions.keys(),
key=lambda name: self._extensions[name].priority, reverse=True)
extensions = [(name, self._extensions[name])
for name in extension_order]
self._start_at = _get_time()
# invoke initializer of each extension
for _, entry in extensions:
initializer = getattr(entry.extension, 'initialize', None)
finished = getattr(entry.trigger, 'finished', False)
if initializer and not finished:
initializer(self)
update = self.updater.update
reporter = self.reporter
stop_trigger = self.stop_trigger
# call extensions before training loop
if self.is_before_training:
self.observation = {}
with reporter.scope(self.observation):
for name, entry in extensions:
if entry.call_before_training:
entry.extension(self)
# main training loop
try:
while not stop_trigger(self):
self.observation = {}
with reporter.scope(self.observation):
update()
for name, entry in extensions:
if entry.trigger(self):
entry.extension(self)
except Exception as e:
if show_loop_exception_msg:
# Show the exception here, as it will appear as if chainer
# hanged in case any finalize method below deadlocks.
f = sys.stderr
f.write('Exception in main training loop: {}\n'.format(e))
f.write('Traceback (most recent call last):\n')
traceback.print_tb(sys.exc_info()[2])
f.write('Will finalize trainer extensions and updater before '
'reraising the exception.\n')
# In Python 2, sys.exc_info() is updated if any folloing
# exceptions happens even if it's in a limited scope (like
# try-catch clause below). Thus the exception from main
# loop is preserved here.
exc_info = sys.exc_info()
for _, entry in extensions:
handler = getattr(entry.extension, 'on_error', None)
if handler:
try:
# It is guaranteed all handlers are called,
# but exceptions thrown by those handlers are
# just printed and ignored, as well as its
# return values.
handler(self, e, sys.exc_info()[2])
except Exception as he:
f.write('Exception in error handler: {}\n'.format(he))
f.write('Traceback (most recent call last):\n')
traceback.print_tb(sys.exc_info()[2])
six.reraise(*exc_info)
finally:
for _, entry in extensions:
finalize = getattr(entry.extension, 'finalize', None)
if finalize:
finalize()
self.updater.finalize()
self._final_elapsed_time = self.elapsed_time
self._done = True
def serialize(self, serializer):
self.updater.serialize(serializer['updater'])
if hasattr(self.stop_trigger, 'serialize'):
self.stop_trigger.serialize(serializer['stop_trigger'])
s = serializer['extensions']
t = serializer['extension_triggers']
for name, entry in six.iteritems(self._extensions):
if hasattr(entry.extension, 'serialize'):
entry.extension.serialize(s[name])
if hasattr(entry.trigger, 'serialize'):
entry.trigger.serialize(t[name])
if isinstance(serializer, serializer_module.Serializer):
serializer('_snapshot_elapsed_time', self.elapsed_time)
else:
self._snapshot_elapsed_time = serializer(
'_snapshot_elapsed_time', 0.0)
| 16,918
| 40.775309
| 79
|
py
|
chainer
|
chainer-master/chainer/training/updaters/standard_updater.py
|
import warnings
import six
import chainer
from chainer.backends import cuda
from chainer.dataset import convert
from chainer.dataset import iterator as iterator_module
from chainer import device_resident
from chainer.training import _updater
from chainer.utils import argument
class StandardUpdater(_updater.Updater):
"""StandardUpdater(\
iterator, optimizer, converter=convert.concat_examples, device=None, \
loss_func=None, loss_scale=None, auto_new_epoch=True, *, input_device=None)
Standard implementation of Updater.
This is the standard implementation of :class:`~chainer.training.Updater`.
It accepts one or more training datasets and one or more optimizers.
The default update routine assumes that there is only one training dataset
and one optimizer. Users can override this update routine by inheriting
this class and overriding the :meth:`update_core` method. Each batch is
converted to input arrays by :func:`chainer.dataset.concat_examples` by
default, which can also be manually set by ``converter`` argument.
Args:
iterator: Dataset iterator for the training dataset. It can also be a
dictionary that maps strings to iterators.
If this is just an iterator, then the
iterator is registered by the name ``'main'``.
optimizer: Optimizer to update parameters. It can also be a dictionary
that maps strings to optimizers.
If this is just an optimizer, then the optimizer is
registered by the name ``'main'``.
converter: Converter function to build input arrays. Each batch
extracted by the main iterator and the ``device`` option are passed
to this function. :func:`chainer.dataset.concat_examples` is used
by default.
device(device specifier): Device to which the model is sent.
If ``None``, the device of the model will stay unchanged.
loss_func: Loss function. The target link of the main optimizer is used
by default.
loss_scale (float): Loss scaling factor. Loss scaling is a usefull
technique to mitigate vanishing gradient issue that tends to happen
when low precision data type like float16 is used during training.
If you set loss scaling factor, gradients of loss values are to be
multiplied by the factor before backprop starts. The factor is
propagated to whole gradients in a computational graph along the
backprop. The gradients of parameters are divided by the factor
just before the parameters are to be updated.
auto_new_epoch (bool): If ``True``,
:meth:`~chainer.Optimizer.new_epoch` of the main optimizer is
automatically called when the ``is_new_epoch`` attribute of the
main iterator is ``True``.
input_device (device specifier):
Device to which the training data is sent.
If ``input_device`` is omitted, it will match the ``device``
argument.
Attributes:
converter: Converter function.
loss_func: Loss function. If it is ``None``, the target link of the
main optimizer is used instead.
device: Device to which the model is sent.
input_device: Device to which the training data is sent.
iteration: Current number of completed updates.
auto_new_epoch: If ``True``, :meth:`~chainer.Optimizer.new_epoch` is
automatically called by :meth:`update_core`. In this case, the
:attr:`~chainer.Optimizer.use_auto_new_epoch` attribute of each
optimizer is also set to ``True``. If :meth:`update_core` is
overridden, the implementation should correctly call
:meth:`~chainer.Optimizer.new_epoch` of each optimizer.
"""
def __init__(self, iterator, optimizer, converter=convert.concat_examples,
device=None, loss_func=None, loss_scale=None,
auto_new_epoch=True, **kwargs):
input_device, = argument.parse_kwargs(
kwargs, ('input_device', None))
if device is not None:
device = chainer.get_device(device)
# input_device falls back to device
if input_device is None:
input_device = device
else:
input_device = chainer.get_device(input_device)
if isinstance(iterator, iterator_module.Iterator):
iterator = {'main': iterator}
self._iterators = iterator
if not isinstance(optimizer, dict):
optimizer = {'main': optimizer}
self._optimizers = optimizer
# Transfer the model
if device is not None:
for optimizer in six.itervalues(self._optimizers):
if device.xp is cuda.cupy:
# Do not transfer between different cupy devices.
# Detect GPU-to-GPU transfer and raise FutureWarning.
# TODO(niboshi): Eventually replace it with to_device.
thread_local = device_resident._thread_local
has_gpu_to_gpu = False
try:
# Turn on GPU-to-GPU detection
thread_local.flag_gpu_to_gpu = False
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
message='to_gpu is deprecated.',
category=DeprecationWarning)
optimizer.target.to_gpu(device.device.id)
has_gpu_to_gpu = thread_local.flag_gpu_to_gpu
finally:
# Turn off GPU-to-GPU detection
thread_local.flag_gpu_to_gpu = None
if has_gpu_to_gpu:
warnings.warn(
'''\
Transfer between @cupy devices was detected and skipped. \
StandardUpdater normally transfers the model to the specified device, but \
except for between @cupy devices. \
That is, if a part of the model is on @cupy:n device and the specified \
device is @cupy:m device, that part of the model will be left in @cupy:n \
device. This behavior is planned to be changed in near future. \
After that, the model will be transferred to the specified device regardless \
of device combination. \
If you want to keep the model device but only want to transfer the input data \
to a given device, specify the 'input_device' argument instead and leave the \
'device' argument unspecified.
''',
FutureWarning)
else:
optimizer.target.to_device(device)
self.converter = converter
self.loss_func = loss_func
self.iteration = 0
self._device = device
self._input_device = input_device
self.loss_scale = loss_scale
if loss_scale is not None:
for optimizer in six.itervalues(self._optimizers):
optimizer.set_loss_scale(loss_scale)
self.auto_new_epoch = auto_new_epoch
if auto_new_epoch:
for o in six.itervalues(self._optimizers):
o.use_auto_new_epoch = True
@property
def device(self):
return self._device
@property
def input_device(self):
return self._input_device
@property
def epoch(self):
return self._iterators['main'].epoch
@property
def epoch_detail(self):
return self._iterators['main'].epoch_detail
@property
def previous_epoch_detail(self):
return self._iterators['main'].previous_epoch_detail
@property
def is_new_epoch(self):
return self._iterators['main'].is_new_epoch
def finalize(self):
"""Finalizes the updater object.
This method calls the `finalize` method of each iterator that
this updater has.
It is called at the end of training loops.
"""
for iterator in six.itervalues(self._iterators):
iterator.finalize()
def get_optimizer(self, name):
"""Gets the optimizer of given name.
Args:
name (str): Name of the optimizer.
Returns:
~chainer.Optimizer: Corresponding optimizer.
"""
return self._optimizers[name]
def get_all_optimizers(self):
"""Gets a dictionary of all optimizers for this updater.
Returns:
dict: Dictionary that maps names to optimizers.
"""
return dict(self._optimizers)
def get_iterator(self, name):
"""Gets the dataset iterator of given name.
Args:
name (str): Name of the dataset iterator.
Returns:
~chainer.dataset.Iterator: Corresponding dataset iterator.
"""
return self._iterators[name]
def update(self):
"""Updates the parameters of the target model.
This method implements an update formula for the training task,
including data loading, forward/backward computations, and actual
updates of parameters.
This method is called once at each iteration of the training loop.
"""
self.update_core()
self.iteration += 1
def update_core(self):
iterator = self._iterators['main']
batch = iterator.next()
in_arrays = convert._call_converter(
self.converter, batch, self.input_device)
optimizer = self._optimizers['main']
loss_func = self.loss_func or optimizer.target
if isinstance(in_arrays, tuple):
optimizer.update(loss_func, *in_arrays)
elif isinstance(in_arrays, dict):
optimizer.update(loss_func, **in_arrays)
else:
optimizer.update(loss_func, in_arrays)
if self.auto_new_epoch and iterator.is_new_epoch:
optimizer.new_epoch(auto=True)
def serialize(self, serializer):
"""Serializes the current state of the updater object."""
for name, iterator in six.iteritems(self._iterators):
iterator.serialize(serializer['iterator:' + name])
for name, optimizer in six.iteritems(self._optimizers):
optimizer.serialize(serializer['optimizer:' + name])
optimizer.target.serialize(serializer['model:' + name])
self.iteration = serializer('iteration', self.iteration)
| 10,516
| 37.665441
| 79
|
py
|
chainer
|
chainer-master/chainer/training/updaters/multiprocess_parallel_updater.py
|
import multiprocessing
import warnings
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer.dataset import convert
from chainer import reporter
from chainer.training.updaters import standard_updater
try:
from cupy.cuda import nccl
_available = True
except Exception:
_available = False
class _Worker(multiprocessing.Process):
def __init__(self, proc_id, pipe, master):
super(_Worker, self).__init__()
self.proc_id = proc_id
self.pipe = pipe
self.converter = master.converter
self.model = master._master
self.device = master._devices[proc_id]
self.iterator = master._mpu_iterators[proc_id]
self.n_devices = len(master._devices)
def setup(self):
_, comm_id = self.pipe.recv()
self.comm = nccl.NcclCommunicator(self.n_devices, comm_id,
self.proc_id)
self.model.to_device(self.device)
self.reporter = reporter.Reporter()
self.reporter.add_observer('main', self.model)
self.reporter.add_observers('main',
self.model.namedlinks(skipself=True))
def run(self):
self.device.use()
self.setup()
while True:
job, data = self.pipe.recv()
if job == 'finalize':
self.device.device.synchronize()
break
if job == 'update':
# For reducing memory
self.model.cleargrads()
batch = self.converter(self.iterator.next(), self.device)
with self.reporter.scope({}): # pass dummy observation
loss = _calc_loss(self.model, batch)
self.model.cleargrads()
loss.backward()
del loss
gg = gather_grads(self.model)
nccl_data_type = _get_nccl_data_type(gg.dtype)
null_stream = cuda.Stream.null
self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size,
nccl_data_type, nccl.NCCL_SUM, 0,
null_stream.ptr)
del gg
self.model.cleargrads()
gp = gather_params(self.model)
nccl_data_type = _get_nccl_data_type(gp.dtype)
self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0,
null_stream.ptr)
scatter_params(self.model, gp)
del gp
class MultiprocessParallelUpdater(standard_updater.StandardUpdater):
"""Implementation of a multiprocess parallel GPU Updater.
This is an implementation of :class:`Updater` that uses multiple GPUs
with multi-process data parallelism. It uses Nvidia NCCL for communication
between multiple GPUs.
It behaves similarly to
:class:`~chainer.training.updaters.StandardUpdater`.
The update routine is modified to support data-parallel
computation on multiple GPUs in one machine.
It is based on synchronous parallel SGD: it
parallelizes the gradient computation over a mini-batch, and updates the
parameters only in the main device.
It does not transfer the values collected by :class:`Reporter` in the sub
devices to the main device. So you can only see the reported values in
the main device.
Args:
iterators: List of dataset iterator for the training dataset. The
number of the iterators must be same to the number of GPUs you use.
optimizer: Optimizer to update parameters. The model should be attached
to the optimizer.
converter: Converter function to build input arrays. Each batch
extracted by the iterator is split equally between the devices and
then passed with corresponding ``device`` option to this function.
:func:`~chainer.dataset.concat_examples` is used by default.
devices: Dictionary or list of devices to which the training data is
sent. The master device will be the first one in the list or the
value attached to the key ``'main'``.
auto_new_epoch (bool): If ``True``,
:meth:`~chainer.Optimizer.new_epoch` of the main optimizer is
automatically called when the ``is_new_epoch`` attribute of the
main iterator is ``True``.
"""
def __init__(self, iterators, optimizer, converter=convert.concat_examples,
devices=None, auto_new_epoch=True):
if not MultiprocessParallelUpdater.available():
raise Exception(
'NCCL is not enabled. MultiprocessParallelUpdater '
'requires NCCL.\n'
'Please reinstall CuPy after you install NCCL.\n'
'(see https://docs-cupy.chainer.org/en/latest/install.html)')
try:
cuda.cupy.cuda.driver.ctxGetCurrent()
_cuda_initialized = True
except cuda.cupy.cuda.driver.CUDADriverError:
# The context is not initialized, it will be fine.
_cuda_initialized = False
if (_cuda_initialized and multiprocessing.get_start_method()
not in ('spawn', 'forkserver')):
raise RuntimeError(
'The CUDA context has been already initialized. '
'MultiprocessParallelUpdater assumes the context is '
'uninitialized. Please do not call CUDA API before '
'MultiprocessParallelUpdater creates processes '
'or use multiprocessing.set_start_method with '
'\'spawn\' or \'forkserver\' as arguments.')
assert len(iterators) == len(devices)
for iterator in iterators[1:]:
assert len(iterator.dataset) == len(iterators[0].dataset)
# Correct optimizer parameters for new minibatch size
optim = optimizer.__class__.__name__
if optim in ('Adam', 'AdaGrad', 'RMSprop'):
optimizer.eps *= len(devices)
warnings.warn('optimizer.eps is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.eps))
elif optim in ('RMSpropGraves', 'AdaDelta'):
optimizer.eps *= len(devices) ** 2 # not quite right for AdaDelta
warnings.warn('optimizer.eps is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.eps))
elif hasattr(optimizer, 'lr'):
optimizer.lr /= len(devices)
warnings.warn('optimizer.lr is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.lr))
super(MultiprocessParallelUpdater, self).__init__(
iterator=iterators[0],
optimizer=optimizer,
converter=converter,
auto_new_epoch=auto_new_epoch,
)
if isinstance(devices, dict):
devices = devices.copy()
main = devices.pop('main')
devices = list(six.itervalues(devices))
devices = [main] + devices
elif isinstance(devices, (list, tuple)):
devices = list(devices)
else:
raise ValueError(
'devices argument should be either dict, list or tuple,'
' but {} was given.'.format(type(devices)))
if devices is None or any(device is None for device in devices):
raise ValueError('GPU devices must be specified.')
self._master = optimizer.target
self._devices = [chainer.get_device(device) for device in devices]
self._mpu_iterators = iterators
self._initialized = False
self._pipes = []
self._workers = []
self.comm = None
@staticmethod
def available():
return _available
def _send_message(self, message):
for pipe in self._pipes:
pipe.send(message)
def setup_workers(self):
if self._initialized:
return
self._initialized = True
self._master.cleargrads()
for i in six.moves.range(1, len(self._devices)):
pipe, worker_end = multiprocessing.Pipe()
worker = _Worker(i, worker_end, self)
worker.start()
self._workers.append(worker)
self._pipes.append(pipe)
with chainer.using_device(self._devices[0]):
self._master.to_device(self._devices[0])
if len(self._devices) > 1:
comm_id = nccl.get_unique_id()
self._send_message(('set comm_id', comm_id))
self.comm = nccl.NcclCommunicator(
len(self._devices), comm_id, 0)
def update_core(self):
self.setup_workers()
self._send_message(('update', None))
with chainer.using_device(self._devices[0]):
# For reducing memory
self._master.cleargrads()
optimizer = self.get_optimizer('main')
iterator = self.get_iterator('main')
batch = iterator.next()
batch = self.converter(batch, self._devices[0])
loss = _calc_loss(self._master, batch)
self._master.cleargrads()
loss.backward()
# NCCL: reduce grads
null_stream = cuda.Stream.null
if self.comm is not None:
gg = gather_grads(self._master)
nccl_data_type = _get_nccl_data_type(gg.dtype)
self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size,
nccl_data_type, nccl.NCCL_SUM,
0, null_stream.ptr)
scatter_grads(self._master, gg)
del gg
optimizer.update()
if self.comm is not None:
gp = gather_params(self._master)
nccl_data_type = _get_nccl_data_type(gp.dtype)
self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type,
0, null_stream.ptr)
if self.auto_new_epoch and iterator.is_new_epoch:
optimizer.new_epoch(auto=True)
def finalize(self):
self._send_message(('finalize', None))
for worker in self._workers:
worker.join()
super(MultiprocessParallelUpdater, self).finalize()
def _calc_loss(model, in_arrays):
if isinstance(in_arrays, tuple):
return model(*in_arrays)
elif isinstance(in_arrays, dict):
return model(**in_arrays)
else:
return model(in_arrays)
def size_num_grads(link):
"""Count total size of all gradient arrays of a given link
Args:
link (chainer.link.Link): Target link object.
"""
size = 0
num = 0
for param in link.params():
if param.size == 0:
continue
size += param.size
num += 1
return size, num
def _memcpy_gather():
return cuda.elementwise(
'raw T ptrs, raw X dtypes, raw Y info',
'raw float32 dst',
'''
int id_min = id_pre;
int id_max = num_src;
while (id_max - id_min > 1) {
int id = (id_max + id_min) / 2;
if (i < info[id]) id_max = id;
else id_min = id;
}
int id = id_min;
int i_dst = i;
int i_src = i;
if (id > 0) i_src -= info[id];
dst[i_dst] = 0;
if (ptrs[id] != NULL) {
if (dtypes[id] == 0) { // fp32
float *src = reinterpret_cast<float *>(ptrs[id]);
dst[i_dst] = src[i_src];
}
else { // fp16
float16 *src = reinterpret_cast<float16 *>(ptrs[id]);
dst[i_dst] = static_cast<float>(src[i_src]);
}
}
id_pre = id;
''',
'_memcpy_gather',
loop_prep='''
int num_src = info[0];
int id_pre = 0;
''')
def _gather(link, target):
size, num = size_num_grads(link)
ptrs = numpy.empty(num, dtype=numpy.uint64)
dtypes = numpy.empty(num, dtype=numpy.int8)
info = numpy.empty(num + 1, dtype=numpy.int32)
info[0] = 0
i = 0
for _, param in sorted(link.namedparams()):
if param.size == 0:
continue
ptrs[i] = 0 # NULL pointer
d = getattr(param, target)
if d is not None:
ptrs[i] = d.data.ptr
dtypes[i] = 0 # fp32
if param.dtype == numpy.float16:
dtypes[i] = 1 # fp16
info[i + 1] = info[i] + param.size
i += 1
info[0] = num
ptrs = cuda.to_gpu(ptrs)
dtypes = cuda.to_gpu(dtypes)
info = cuda.to_gpu(info)
return _memcpy_gather()(ptrs, dtypes, info, size=size)
def gather_grads(link):
"""Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray
"""
if link.xp is numpy:
raise RuntimeError('gather_grads works only on GPU.')
return _gather(link, 'grad')
def gather_params(link):
"""Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray
"""
if link.xp is numpy:
raise RuntimeError('Link.gather_params works only on GPU.')
return _gather(link, 'data')
def _memcpy_scatter():
return cuda.elementwise(
'raw T ptrs, raw X dtypes, raw Y info, raw float32 array',
'',
'''
int id_min = id_pre;
int id_max = num_src;
while (id_max - id_min > 1) {
int id = (id_max + id_min) / 2;
if (i < info[id]) id_max = id;
else id_min = id;
}
int id = id_min;
int i_src = i;
int i_dst = i;
if (id > 0) i_dst -= info[id];
if (ptrs[id] != NULL) {
if (dtypes[id] == 0) { // fp32
float *dst = reinterpret_cast<float *>(ptrs[id]);
dst[i_dst] = array[i_src];
}
else { // fp16
float16 *dst = reinterpret_cast<float16 *>(ptrs[id]);
dst[i_dst] = static_cast<float16>(array[i_src]);
}
}
id_pre = id;
''',
'_memcpy_scatter',
loop_prep='''
int num_src = info[0];
int id_pre = 0;
''')
def _scatter(link, array, target):
size, num = size_num_grads(link)
ptrs = numpy.zeros(num, dtype=numpy.uint64)
dtypes = numpy.zeros(num, dtype=numpy.int8)
info = numpy.zeros(num + 1, dtype=numpy.int32)
info[0] = 0
i = 0
for _, param in sorted(link.namedparams()):
if param.size == 0:
continue
ptrs[i] = 0 # NULL pointer
d = getattr(param, target)
if d is None:
d = cuda.cupy.zeros(param.shape, dtype=param.dtype)
setattr(param, target, d)
ptrs[i] = d.data.ptr
dtypes[i] = 0 # fp32
if param.dtype == numpy.float16:
dtypes[i] = 1 # fp16
info[i + 1] = info[i] + param.size
i += 1
if i != num:
raise()
info[0] = num
ptrs = cuda.to_gpu(ptrs)
dtypes = cuda.to_gpu(dtypes)
info = cuda.to_gpu(info)
return _memcpy_scatter()(ptrs, dtypes, info, array, size=size)
def scatter_grads(link, array):
"""Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_grads()
"""
return _scatter(link, array, 'grad')
def scatter_params(link, array):
"""Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_params()
"""
return _scatter(link, array, 'data')
def _get_nccl_data_type(dtype):
"""Get data type for NCCL"""
if dtype == numpy.float32:
nccl_data_type = nccl.NCCL_FLOAT
elif dtype == numpy.float16:
nccl_data_type = nccl.NCCL_HALF
elif dtype == numpy.float64:
nccl_data_type = nccl.NCCL_DOUBLE
else:
raise RuntimeError('Unexpected data type:{}'.format(dtype))
return nccl_data_type
| 16,699
| 32.737374
| 79
|
py
|
chainer
|
chainer-master/chainer/training/updaters/__init__.py
|
from chainer.training.updaters.multiprocess_parallel_updater import MultiprocessParallelUpdater # NOQA
from chainer.training.updaters.parallel_updater import ParallelUpdater # NOQA
from chainer.training.updaters.standard_updater import StandardUpdater # NOQA
| 262
| 64.75
| 103
|
py
|
chainer
|
chainer-master/chainer/training/updaters/parallel_updater.py
|
import copy
import six
import chainer
from chainer.dataset import convert
from chainer import function
from chainer.training.updaters import standard_updater
class ParallelUpdater(standard_updater.StandardUpdater):
"""Implementation of a parallel GPU Updater.
This is an implementation of :class:`Updater` that uses multiple GPUs.
It behaves similarly to
:class:`~chainer.training.updaters.StandardUpdater`.
The update routine is modified to support data-parallel computation
on multiple GPUs in one machine.
It is based on synchronous parallel SGD: it
parallelizes the gradient computation over a mini-batch, and updates the
parameters only in the main device.
Args:
iterator: Dataset iterator for the training dataset. It can also be a
dictionary that maps strings to iterators.
If this is just an iterator, then the
iterator is registered by the name ``'main'``.
optimizer: Optimizer to update parameters. It can also be a dictionary
that maps strings to optimizers.
If this is just an optimizer, then the optimizer is
registered by the name ``'main'``.
converter: Converter function to build input arrays. Each batch
extracted by the main iterator is split equally between the
devices and then passed with corresponding ``device`` option to
this function. :func:`~chainer.dataset.concat_examples` is used by
default.
models: Dictionary of models. The main model should be the same model
attached to the ``'main'`` optimizer.
devices: Dictionary of devices to which the training data is sent. The
devices should be arranged in a dictionary with the same structure
as ``models``.
loss_func: Loss function. The model is used as a loss function by
default.
loss_scale (float): Loss scaling factor. Loss scaling is a useful
technique to mitigate vanishing gradient issue that tends to happen
when low precision data type like float16 is used during training.
If you set loss scaling factor, gradients of loss values are to be
multiplied by the factor before backprop starts. The factor is
propagated to whole gradients in a computational graph along the
backprop. The gradients of parameters are divided by the factor
just before the parameters are to be updated.
auto_new_epoch (bool): If ``True``,
:meth:`~chainer.Optimizer.new_epoch` of the main optimizer is
automatically called when the ``is_new_epoch`` attribute of the
main iterator is ``True``.
"""
def __init__(self, iterator, optimizer, converter=convert.concat_examples,
models=None, devices=None, loss_func=None, loss_scale=None,
auto_new_epoch=True):
super(ParallelUpdater, self).__init__(
iterator=iterator,
optimizer=optimizer,
converter=converter,
loss_func=loss_func,
loss_scale=loss_scale,
auto_new_epoch=auto_new_epoch,
)
if models is None:
if devices is None:
raise ValueError('either models or devices must be specified')
names = list(six.iterkeys(devices))
try:
names.remove('main')
except ValueError:
raise KeyError('\'devices\' must contain a \'main\' key.')
models = {'main': optimizer.target}
for name in names:
model = copy.deepcopy(optimizer.target)
model.to_device(devices[name])
models[name] = model
optimizer.target.to_device(devices['main'])
self._devices = devices
self._models = models
def connect_trainer(self, trainer):
# Add observers for all (other) models.
model_main = self.get_optimizer('main').target
models_others = {
k: v for k, v in self._models.items() if v != model_main
}
for name, model in models_others.items():
trainer.reporter.add_observer(name, model)
def update_core(self):
optimizer = self.get_optimizer('main')
model_main = optimizer.target
models_others = {k: v for k, v in self._models.items()
if v is not model_main}
iterator = self.get_iterator('main')
batch = iterator.next()
#
# Split the batch to sub-batches.
#
n = len(self._models)
in_arrays_list = {}
for i, key in enumerate(six.iterkeys(self._models)):
in_arrays_list[key] = self.converter(
batch[i::n], self._devices[key])
# For reducing memory
for model in six.itervalues(self._models):
model.cleargrads()
losses = []
for model_key, model in six.iteritems(self._models):
in_arrays = in_arrays_list[model_key]
loss_func = self.loss_func or model
with function.force_backprop_mode():
with chainer.using_device(self._devices[model_key]):
if isinstance(in_arrays, tuple):
loss = loss_func(*in_arrays)
elif isinstance(in_arrays, dict):
loss = loss_func(**in_arrays)
else:
loss = loss_func(in_arrays)
losses.append(loss)
# For _uninitialized_params
for model in six.itervalues(self._models):
model.cleargrads()
for loss in losses:
loss.backward(loss_scale=self.loss_scale)
for model in six.itervalues(models_others):
model_main.addgrads(model)
optimizer.update()
for model in six.itervalues(models_others):
model.copyparams(model_main)
if self.auto_new_epoch and iterator.is_new_epoch:
optimizer.new_epoch(auto=True)
| 6,102
| 38.121795
| 79
|
py
|
chainer
|
chainer-master/chainer/training/extensions/snapshot_writers.py
|
import multiprocessing
import os
import shutil
import threading
from six.moves import queue
from chainer.serializers import npz
from chainer import utils
class Writer(object):
"""Base class of snapshot writers.
:class:`~chainer.training.extensions.Snapshot` invokes ``__call__`` of this
class every time when taking a snapshot.
This class determines how the actual saving function will be invoked.
.. seealso::
- :meth:`chainer.training.extensions.snapshot`
"""
def __init__(self):
self._post_save_hooks = []
def __call__(self, filename, outdir, target):
"""Invokes the actual snapshot function.
This method is invoked by a
:class:`~chainer.training.extensions.Snapshot` object every time it
takes a snapshot.
Args:
filename (str): Name of the file into which the serialized target
is saved. It is a concrete file name, i.e. not a pre-formatted
template string.
outdir (str): Output directory. Corresponds to
:py:attr:`Trainer.out <chainer.training.Trainer.out>`.
target (dict): Serialized object which will be saved.
"""
raise NotImplementedError
def __del__(self):
self.finalize()
def finalize(self):
"""Finalizes the wirter.
Like extensions in :class:`~chainer.training.Trainer`, this method
is invoked at the end of the training.
"""
pass
def save(self, filename, outdir, target, savefun, **kwds):
prefix = 'tmp' + filename
with utils.tempdir(prefix=prefix, dir=outdir) as tmpdir:
tmppath = os.path.join(tmpdir, filename)
savefun(tmppath, target)
shutil.move(tmppath, os.path.join(outdir, filename))
self._post_save()
def _add_cleanup_hook(self, hook_fun):
"""Adds cleanup hook function.
Technically, arbitrary user-defined hook can be called, but
this is intended for cleaning up stale snapshots.
Args:
hook_fun (callable): callable function to be called
right after save is done. It takes no arguments.
"""
self._post_save_hooks.append(hook_fun)
def _post_save(self):
for hook in self._post_save_hooks:
hook()
class SimpleWriter(Writer):
"""The most simple snapshot writer.
This class just passes the arguments to the actual saving function.
Args:
savefun: Callable object. It takes three arguments: the output file
path, the serialized dictionary object, and the optional keyword
arguments.
kwds: Keyword arguments for the ``savefun``.
.. seealso::
- :meth:`chainer.training.extensions.snapshot`
"""
def __init__(self, savefun=npz.save_npz, **kwds):
super(SimpleWriter, self).__init__()
self._savefun = savefun
self._kwds = kwds
def __call__(self, filename, outdir, target):
self.save(filename, outdir, target, self._savefun, **self._kwds)
class StandardWriter(Writer):
"""Base class of snapshot writers which use thread or process.
This class creates a new thread or a process every time when ``__call__``
is invoked.
Args:
savefun: Callable object. It takes three arguments: the output file
path, the serialized dictionary object, and the optional keyword
arguments.
kwds: Keyword arguments for the ``savefun``.
.. seealso::
- :meth:`chainer.training.extensions.snapshot`
"""
_started = False
_finalized = False
_worker = None
def __init__(self, savefun=npz.save_npz, **kwds):
super(StandardWriter, self).__init__()
self._savefun = savefun
self._kwds = kwds
self._started = False
self._finalized = False
def __call__(self, filename, outdir, target):
if self._started:
self._worker.join()
self._started = False
self._filename = filename
self._worker = self.create_worker(filename, outdir, target,
**self._kwds)
self._worker.start()
self._started = True
def create_worker(self, filename, outdir, target, **kwds):
"""Creates a worker for the snapshot.
This method creates a thread or a process to take a snapshot. The
created worker must have :meth:`start` and :meth:`join` methods.
Args:
filename (str): Name of the file into which the serialized target
is saved. It is already formated string.
outdir (str): Output directory. Passed by `trainer.out`.
target (dict): Serialized object which will be saved.
kwds: Keyword arguments for the ``savefun``.
"""
raise NotImplementedError
def finalize(self):
if self._started:
if not self._finalized:
self._worker.join()
self._started = False
self._finalized = True
class ThreadWriter(StandardWriter):
"""Snapshot writer that uses a separate thread.
This class creates a new thread that invokes the actual saving function.
.. seealso::
- :meth:`chainer.training.extensions.snapshot`
"""
def __init__(self, savefun=npz.save_npz, **kwds):
super(ThreadWriter, self).__init__(savefun=savefun, **kwds)
def create_worker(self, filename, outdir, target, **kwds):
return threading.Thread(
target=self.save,
args=(filename, outdir, target, self._savefun),
kwargs=self._kwds)
class ProcessWriter(StandardWriter):
"""Snapshot writer that uses a separate process.
This class creates a new process that invokes the actual saving function.
.. note::
Forking a new process from a MPI process might be danger. Consider
using :class:`ThreadWriter` instead of ``ProcessWriter`` if you are
using MPI.
.. seealso::
- :meth:`chainer.training.extensions.snapshot`
"""
def __init__(self, savefun=npz.save_npz, **kwds):
super(ProcessWriter, self).__init__(savefun=savefun, **kwds)
def create_worker(self, filename, outdir, target, **kwds):
return multiprocessing.Process(
target=self.save,
args=(filename, outdir, target, self._savefun),
kwargs=self._kwds)
class QueueWriter(Writer):
"""Base class of queue snapshot writers.
This class is a base class of snapshot writers that use a queue.
A Queue is created when this class is constructed, and every time when
``__call__`` is invoked, a snapshot task is put into the queue.
Args:
savefun: Callable object which is passed to the :meth:`create_task`
if the task is ``None``. It takes three arguments: the output file
path, the serialized dictionary object, and the optional keyword
arguments.
task: Callable object. Its ``__call__`` must have a same interface to
``Writer.__call__``. This object is directly put into the queue.
.. seealso::
- :meth:`chainer.training.extensions.snapshot`
"""
_started = False
_finalized = False
_queue = None
_consumer = None
def __init__(self, savefun=npz.save_npz, task=None):
super(QueueWriter, self).__init__()
if task is None:
self._task = self.create_task(savefun)
else:
self._task = task
self._queue = self.create_queue()
self._consumer = self.create_consumer(self._queue)
self._consumer.start()
self._started = True
self._finalized = False
def __call__(self, filename, outdir, target):
self._queue.put([self._task, filename, outdir, target])
def create_task(self, savefun):
return SimpleWriter(savefun=savefun)
def create_queue(self):
raise NotImplementedError
def create_consumer(self, q):
raise NotImplementedError
def consume(self, q):
while True:
task = q.get()
if task is None:
q.task_done()
return
else:
task[0](task[1], task[2], task[3])
q.task_done()
def finalize(self):
if self._started:
if not self._finalized:
self._queue.put(None)
self._queue.join()
self._consumer.join()
self._started = False
self._finalized = True
class ThreadQueueWriter(QueueWriter):
"""Snapshot writer that uses a thread queue.
This class creates a thread and a queue by :mod:`threading` and
:mod:`queue` modules
respectively. The thread will be a consumer of the queue, and the main
thread will be a producer of the queue.
.. seealso::
- :meth:`chainer.training.extensions.snapshot`
"""
def __init__(self, savefun=npz.save_npz, task=None):
super(ThreadQueueWriter, self).__init__(savefun=savefun, task=task)
def create_queue(self):
return queue.Queue()
def create_consumer(self, q):
return threading.Thread(target=self.consume, args=(q,))
class ProcessQueueWriter(QueueWriter):
"""Snapshot writer that uses process queue.
This class creates a process and a queue by :mod:`multiprocessing` module.
The process will be a consumer of this queue, and the main process will be
a producer of this queue.
.. note::
Forking a new process from MPI process might be danger. Consider using
:class:`ThreadQueueWriter` instead of ``ProcessQueueWriter`` if you are
using MPI.
.. seealso::
- :meth:`chainer.training.extensions.snapshot`
"""
def __init__(self, savefun=npz.save_npz, task=None):
super(ProcessQueueWriter, self).__init__(savefun=savefun, task=task)
def create_queue(self):
return multiprocessing.JoinableQueue()
def create_consumer(self, q):
return multiprocessing.Process(target=self.consume, args=(q,))
| 10,153
| 29.492492
| 79
|
py
|
chainer
|
chainer-master/chainer/training/extensions/inverse_shift.py
|
from __future__ import division
import numpy
from chainer.training import extension
class InverseShift(extension.Extension):
"""Trainer extension to shift an optimizer attribute.
The new value is computed according to the fomula below:
new_attr = init_attr * (1 + gamma * iter) ^ (- power), which is compatible
to the ``inv`` learning rate policy in Caffe.
The typical use is to decrease the learning rate during the training.
This extension is also called before the training loop starts by default.
Args:
attr (str): Name of the attribute to shift.
gamma (float): Parameter used to compute the new value. Refer to the
fomula above. Note that gamma is assumed to be nonegative.
power (float): Parameter used to compute the new value. Refer to the
fomula above.
init (float): Initial value of the attribute. If it is ``None``, the
extension extracts the attribute at the first call and uses it as
the initial value.
target (float): Target value of the attribute. If the attribute reaches
this value, the shift stops.
optimizer (~chainer.Optimizer): Target optimizer to adjust the
attribute. If it is ``None``, the main optimizer of the updater is
used.
"""
def __init__(self, attr, gamma, power,
init=None, target=None, optimizer=None):
self._attr = attr
if gamma < 0:
raise ValueError('InverseShift does not support negative gamma')
self._gamma = gamma
self._power = power
self._init = init
self._target = target
self._optimizer = optimizer
self._t = 0
self._last_value = None
def initialize(self, trainer):
optimizer = self._get_optimizer(trainer)
# ensure that _init is set
if self._init is None:
self._init = getattr(optimizer, self._attr)
if self._last_value is not None: # resuming from a snapshot
self._update_value(optimizer, self._last_value)
else:
self._update_value(optimizer, self._init)
def __call__(self, trainer):
self._t += 1
optimizer = self._get_optimizer(trainer)
value = self._init * (1 + self._gamma * self._t) ** (-self._power)
if self._target is not None:
if self._power < 0:
# almost same as value = min(value, self._target), but this
# line supports negative values, too
if value / self._target > 1:
value = self._target
else:
# ditto
if value / self._target < 1:
value = self._target
self._update_value(optimizer, value)
def serialize(self, serializer):
self._t = serializer('_t', self._t)
self._last_value = serializer('_last_value', self._last_value)
if isinstance(self._last_value, numpy.ndarray):
self._last_value = self._last_value.item()
def _get_optimizer(self, trainer):
return self._optimizer or trainer.updater.get_optimizer('main')
def _update_value(self, optimizer, value):
setattr(optimizer, self._attr, value)
self._last_value = value
| 3,302
| 36.11236
| 79
|
py
|
chainer
|
chainer-master/chainer/training/extensions/multistep_shift.py
|
from __future__ import division
from chainer.training import extension
class MultistepShift(extension.Extension):
"""Trainer extension to shift an optimizer attribute in several steps.
This extension changes an optimizer attribute in several steps, every step
the attribute will multiply a factor ``gamma``.
For example, suppose that this extension is called at every iteration,
and ``init = x``, ``gamma = y``, ``step_value = [s1, s2, s3]``.
Then during the iterations from 0 to (s1 - 1), the attr will be ``x``.
During the iterations from s1 to (s2 - 1), the attr will be ``x * y``.
During the iterations from s2 to (s3 - 1), the attr will be ``x * y * y``.
During the iterations after s3, the attr will be ``x * y * y * y``.
This extension is also called before the training loop starts by default.
Args:
attr (str): Name of the attribute to shift.
init (float): Initial value of the attribute. If it is ``None``, the
extension extracts the attribute at the first call and uses it as
the initial value.
gamma (float): The factor which the attr will mutiply at the beginning
of each step.
step_value (tuple): The first iterations of each step.
optimizer (~chainer.Optimizer): Target optimizer to adjust the
attribute. If it is ``None``, the main optimizer of the updater is
used.
"""
def __init__(self, attr, gamma, step_value, init, optimizer=None):
self._attr = attr
self._gamma = gamma
self._step_value = step_value
self._init = init
self._optimizer = optimizer
self._stepvalue_size = len(step_value)
self._current_step = 0
self._t = 0
def initialize(self, trainer):
optimizer = self._optimizer or trainer.updater.get_optimizer('main')
if self._init is None:
self._init = getattr(optimizer, self._attr)
else:
setattr(optimizer, self._attr, self._init)
def __call__(self, trainer):
self._t += 1
optimizer = self._optimizer or trainer.updater.get_optimizer('main')
if (self._current_step < self._stepvalue_size and
self._t >= self._step_value[self._current_step]):
self._current_step += 1
value = self._init * pow(self._gamma, self._current_step)
setattr(optimizer, self._attr, value)
def serialize(self, serializer):
self._t = serializer('_t', self._t)
self._current_step = serializer('_current_step', self._current_step)
| 2,599
| 39
| 78
|
py
|
chainer
|
chainer-master/chainer/training/extensions/progress_bar.py
|
from __future__ import division
import datetime
import sys
from chainer.training import extension
from chainer.training.extensions import util
class ProgressBar(extension.Extension):
"""Trainer extension to print a progress bar and recent training status.
This extension prints a progress bar at every call. It watches the current
iteration and epoch to print the bar.
Args:
training_length (tuple): Length of whole training. It consists of an
integer and either ``'epoch'`` or ``'iteration'``. If this value is
omitted and the stop trigger of the trainer is
:class:`IntervalTrigger`, this extension uses its attributes to
determine the length of the training.
update_interval (int): Number of iterations to skip printing the
progress bar.
bar_length (int): Length of the progress bar in characters.
out: Stream to print the bar. Standard output is used by default.
"""
def __init__(self, training_length=None, update_interval=100,
bar_length=50, out=sys.stdout):
self._training_length = training_length
self._status_template = None
self._update_interval = update_interval
self._bar_length = bar_length
self._out = out
self._recent_timing = []
self._pbar = _TrainerProgressBar()
def __call__(self, trainer):
self._pbar.trainer = trainer
iteration = trainer.updater.iteration
# print the progress bar
if iteration % self._update_interval == 0:
self._pbar.update()
def finalize(self):
self._pbar.close()
class _TrainerProgressBar(util.ProgressBar):
trainer = None
training_length = None
status_template = None
def get_lines(self):
lines = []
iteration = self.trainer.updater.iteration
epoch = self.trainer.updater.epoch_detail
if self.training_length is None:
t = self.trainer.stop_trigger
self.training_length = t.get_training_length()
length, unit = self.training_length
if unit == 'iteration':
rate = iteration / length
else:
rate = epoch / length
rate = min(rate, 1.0)
bar_length = self._bar_length
marks = '#' * int(rate * bar_length)
lines.append(' total [{}{}] {:6.2%}\n'.format(
marks, '.' * (bar_length - len(marks)), rate))
epoch_rate = epoch - int(epoch)
marks = '#' * int(epoch_rate * bar_length)
lines.append('this epoch [{}{}] {:6.2%}\n'.format(
marks, '.' * (bar_length - len(marks)), epoch_rate))
if self.status_template is None:
self.status_template = (
'{0.iteration:10} iter, {0.epoch} epoch / %s %ss\n' %
self.training_length)
status = self.status_template.format(self.trainer.updater)
lines.append(status)
speed_t, speed_e = self.update_speed(iteration, epoch)
if unit == 'iteration':
estimated_time = (length - iteration) / speed_t
else:
estimated_time = (length - epoch) / speed_e
estimated_time = max(estimated_time, 0.0)
lines.append('{:10.5g} iters/sec. Estimated time to finish: {}.\n'
.format(speed_t,
datetime.timedelta(seconds=estimated_time)))
return lines
| 3,454
| 32.221154
| 79
|
py
|
chainer
|
chainer-master/chainer/training/extensions/exponential_shift.py
|
from __future__ import division
import numpy
from chainer.training import extension
class ExponentialShift(extension.Extension):
"""Trainer extension to exponentially shift an optimizer attribute.
This extension exponentially increases or decreases the specified attribute
of the optimizer. The typical use case is an exponential decay of the
learning rate.
This extension is also called before the training loop starts by default.
Args:
attr (str): Name of the attribute to shift.
rate (float): Rate of the exponential shift. This value is multiplied
to the attribute at each call.
init (float): Initial value of the attribute. If it is ``None``, the
extension extracts the attribute at the first call and uses it as
the initial value.
target (float): Target value of the attribute. If the attribute reaches
this value, the shift stops.
optimizer (~chainer.Optimizer): Target optimizer to adjust the
attribute. If it is ``None``, the main optimizer of the updater is
used.
"""
def __init__(self, attr, rate, init=None, target=None, optimizer=None):
self._attr = attr
if rate < 0:
raise ValueError('ExponentialShift does not support negative rate')
self._rate = rate
self._init = init
self._target = target
self._optimizer = optimizer
self._t = 0
self._last_value = None
def initialize(self, trainer):
optimizer = self._get_optimizer(trainer)
# ensure that _init is set
if self._init is None:
self._init = getattr(optimizer, self._attr)
if self._last_value is not None: # resuming from a snapshot
self._update_value(optimizer, self._last_value)
else:
self._update_value(optimizer, self._init)
def __call__(self, trainer):
self._t += 1
optimizer = self._get_optimizer(trainer)
value = self._init * (self._rate ** self._t)
if self._target is not None:
if self._rate > 1:
# almost same as value = min(value, self._target), but this
# line supports negative values, too
if value / self._target > 1:
value = self._target
else:
# ditto
if value / self._target < 1:
value = self._target
self._update_value(optimizer, value)
def serialize(self, serializer):
self._t = serializer('_t', self._t)
self._last_value = serializer('_last_value', self._last_value)
if isinstance(self._last_value, numpy.ndarray):
self._last_value = self._last_value.item()
def _get_optimizer(self, trainer):
return self._optimizer or trainer.updater.get_optimizer('main')
def _update_value(self, optimizer, value):
setattr(optimizer, self._attr, value)
self._last_value = value
| 3,023
| 35
| 79
|
py
|
chainer
|
chainer-master/chainer/training/extensions/linear_shift.py
|
from __future__ import division
import numpy
from chainer.training import extension
class LinearShift(extension.Extension):
"""Trainer extension to change an optimizer attribute linearly.
This extension changes an optimizer attribute from the first value to the
last value linearly within a specified duration. The typical use case is
warming up of the momentum coefficient.
For example, suppose that this extension is called at every iteration, and
``value_range == (x, y)`` and ``time_range == (i, j)``. Then, this
extension keeps the attribute to be ``x`` up to the ``i``-th iteration,
linearly shifts the value to ``y`` by the ``j``-th iteration, and then
keeps the value to be ``y`` after the ``j``-th iteration.
This extension is also called before the training loop starts by default.
Args:
attr (str): Name of the optimizer attribute to adjust.
value_range (tuple of float): The first and the last values of the
attribute.
time_range (tuple of ints): The first and last counts of calls in which
the attribute is adjusted.
optimizer (~chainer.Optimizer): Target optimizer object. If it is None,
the main optimizer of the trainer is used.
"""
def __init__(self, attr, value_range, time_range, optimizer=None):
self._attr = attr
self._value_range = value_range
self._time_range = time_range
self._optimizer = optimizer
self._t = 0
self._last_value = None
def initialize(self, trainer):
optimizer = self._get_optimizer(trainer)
if self._last_value is not None:
value = self._last_value
else:
value = self._compute_next_value()
self._update_value(optimizer, value)
def __call__(self, trainer):
self._t += 1
optimizer = self._get_optimizer(trainer)
value = self._compute_next_value()
self._update_value(optimizer, value)
def serialize(self, serializer):
self._t = serializer('_t', self._t)
self._last_value = serializer('_last_value', self._last_value)
if isinstance(self._last_value, numpy.ndarray):
self._last_value = self._last_value.item()
def _get_optimizer(self, trainer):
return self._optimizer or trainer.updater.get_optimizer('main')
def _compute_next_value(self):
t1, t2 = self._time_range
v1, v2 = self._value_range
if self._t <= t1:
return v1
elif self._t >= t2:
return v2
rate = (self._t - t1) / (t2 - t1)
return v1 + rate * (v2 - v1)
def _update_value(self, optimizer, value):
setattr(optimizer, self._attr, value)
self._last_value = value
| 2,786
| 33.8375
| 79
|
py
|
chainer
|
chainer-master/chainer/training/extensions/_snapshot.py
|
import os
import warnings
import chainer
from chainer.serializers import npz
from chainer.training import extension
from chainer.training.extensions import snapshot_writers
from chainer.utils import argument
def _find_snapshot_files(fmt, path):
'''Only prefix and suffix match
TODO(kuenishi): currently clean format string such as
"snapshot{.iteration}.npz" can only be parsed, but tricky (or
invalid) formats like "snapshot{{.iteration}}.npz" are hard to
detect and to properly show errors, just ignored or fails so far.
Args:
fmt (str): format string to match with file names of
existing snapshots, where prefix and suffix are
only examined. Also, files' staleness is judged
by timestamps. The default is metime.
path (str): a directory path to search for snapshot files.
Returns:
A sorted list of pair of ``mtime, filename``, whose file
name that matched the format ``fmt`` directly under ``path``.
'''
prefix = fmt.split('{')[0]
suffix = fmt.split('}')[-1]
matched_files = (file for file in os.listdir(path)
if file.startswith(prefix) and file.endswith(suffix))
def _prepend_mtime(f):
t = os.stat(os.path.join(path, f)).st_mtime
return (t, f)
return sorted(_prepend_mtime(file) for file in matched_files)
def _find_latest_snapshot(fmt, path):
"""Finds the latest snapshots in a directory
Args:
fmt (str): format string to match with file names of
existing snapshots, where prefix and suffix are
only examined. Also, files' staleness is judged
by timestamps. The default is metime.
path (str): a directory path to search for snapshot files.
Returns:
Latest snapshot file, in terms of a file that has newest
``mtime`` that matches format ``fmt`` directly under
``path``. If no such file found, it returns ``None``.
"""
snapshot_files = _find_snapshot_files(fmt, path)
if len(snapshot_files) > 0:
_, filename = snapshot_files[-1]
return filename
return None
def _find_stale_snapshots(fmt, path, n_retains, **kwargs):
"""Finds stale snapshots in a directory, retaining several files
Args:
fmt (str): format string to match with file names of
existing snapshots, where prefix and suffix are
only examined. Also, files' staleness is judged
by timestamps. The default is metime.
path (str): a directory path to search for snapshot files.
n_retains (int): Number of snapshot files to retain
through the cleanup. Must be a positive integer for any cleanup to
take place.
num_retain (int): Same as ``n_retains`` (deprecated).
Yields:
str: The next stale file that matches format
``fmt`` directly under ``path`` and with older ``mtime``,
excluding newest ``n_retains`` files.
"""
if 'num_retain' in kwargs:
warnings.warn(
'Argument `num_retain` is deprecated. '
'Please use `n_retains` instead',
DeprecationWarning)
n_retains = kwargs['num_retain']
snapshot_files = _find_snapshot_files(fmt, path)
n_removes = len(snapshot_files) - n_retains
if n_removes > 0:
for _, filename in snapshot_files[:n_removes]:
yield filename
def snapshot_object(target, filename, savefun=None, **kwargs):
"""snapshot_object(target, filename, savefun=None, \
*, condition=None, writer=None, snapshot_on_error=False, \
n_retains=-1, autoload=False)
Returns a trainer extension to take snapshots of a given object.
This extension serializes the given object and saves it to the output
directory.
This extension is called once per epoch by default. To take a
snapshot at a different interval, a trigger object specifying the
required interval can be passed along with this extension
to the `extend()` method of the trainer.
The default priority is -100, which is lower than that of most
built-in extensions.
Args:
target: Object to serialize.
filename (str): Name of the file into which the object is serialized.
It can be a format string, where the trainer object is passed to
the :meth:`str.format` method. For example,
``'snapshot_{.updater.iteration}'`` is converted to
``'snapshot_10000'`` at the 10,000th iteration.
savefun: Function to save the object. It takes two arguments: the
output file path and the object to serialize.
condition: Condition object. It must be a callable object that returns
boolean without any arguments. If it returns ``True``, the snapshot
will be done.
If not, it will be skipped. The default is a function that always
returns ``True``.
writer: Writer object.
It must be a callable object.
See below for the list of built-in writers.
If ``savefun`` is other than ``None``, this argument must be
``None``. In that case, a
:class:`~chainer.training.extensions.snapshot_writers.SimpleWriter`
object instantiated with specified ``savefun`` argument will be
used.
snapshot_on_error (bool): Whether to take a snapshot in case trainer
loop has been failed.
n_retains (int): Number of snapshot files to retain
through the cleanup. Must be a positive integer for any cleanup to
take place. Automatic deletion of old snapshots only works when the
filename is string.
num_retain (int): Same as ``n_retains`` (deprecated).
autoload (bool): With this enabled, the extension automatically
finds the latest snapshot and loads the data to the target.
Automatic loading only works when the filename is a string.
Returns:
Snapshot extension object.
.. seealso::
- :meth:`chainer.training.extensions.snapshot`
"""
if 'num_retain' in kwargs:
warnings.warn(
'Argument `num_retain` is deprecated. '
'Please use `n_retains` instead',
DeprecationWarning)
kwargs['n_retains'] = kwargs.pop('num_retain')
return snapshot(target=target, filename=filename, savefun=savefun,
**kwargs)
def snapshot(savefun=None,
filename='snapshot_iter_{.updater.iteration}', **kwargs):
"""snapshot(savefun=None, filename='snapshot_iter_{.updater.iteration}', \
*, target=None, condition=None, writer=None, snapshot_on_error=False, \
n_retains=-1, autoload=False)
Returns a trainer extension to take snapshots of the trainer.
This extension serializes the trainer object and saves it to the output
directory. It is used to support resuming the training loop from the saved
state.
This extension is called once per epoch by default. To take a
snapshot at a different interval, a trigger object specifying the
required interval can be passed along with this extension
to the `extend()` method of the trainer.
The default priority is -100, which is lower than that of most
built-in extensions.
.. note::
This extension first writes the serialized object to a temporary file
and then rename it to the target file name. Thus, if the program stops
right before the renaming, the temporary file might be left in the
output directory.
Args:
savefun: Function to save the trainer. It takes two arguments: the
output file path and the trainer object.
It is :meth:`chainer.serializers.save_npz` by default.
If ``writer`` is specified, this argument must be ``None``.
filename (str): Name of the file into which the trainer is serialized.
It can be a format string, where the trainer object is passed to
the :meth:`str.format` method.
target: Object to serialize. If it is not specified, it will
be the trainer object.
condition: Condition object. It must be a callable object that returns
boolean without any arguments. If it returns ``True``, the snapshot
will be done.
If not, it will be skipped. The default is a function that always
returns ``True``.
writer: Writer object.
It must be a callable object.
See below for the list of built-in writers.
If ``savefun`` is other than ``None``, this argument must be
``None``. In that case, a
:class:`~chainer.training.extensions.snapshot_writers.SimpleWriter`
object instantiated with specified ``savefun`` argument will be
used.
snapshot_on_error (bool): Whether to take a snapshot in case trainer
loop has been failed.
n_retains (int): Number of snapshot files to retain
through the cleanup. Must be a positive integer for any cleanup to
take place. Automatic deletion of old snapshots only works when the
filename is string.
num_retain (int): Same as ``n_retains`` (deprecated).
autoload (bool): With this enabled, the extension
automatically finds the latest snapshot and loads the data
to the target. Automatic loading only works when the
filename is a string. It is assumed that snapshots are generated
by :func:`chainer.serializers.save_npz` .
Returns:
Snapshot extension object.
.. testcode::
:hide:
from chainer import training
class Model(chainer.Link):
def __call__(self, x):
return x
train_iter = chainer.iterators.SerialIterator([], 1)
optimizer = optimizers.SGD().setup(Model())
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=0)
trainer = training.Trainer(updater)
.. admonition:: Using asynchronous writers
By specifying ``writer`` argument, writing operations can be made
asynchronous, hiding I/O overhead of snapshots.
>>> from chainer.training import extensions
>>> writer = extensions.snapshot_writers.ProcessWriter()
>>> trainer.extend(extensions.snapshot(writer=writer), \
trigger=(1, 'epoch'))
To change the format, such as npz or hdf5, you can pass a saving
function as ``savefun`` argument of the writer.
>>> from chainer.training import extensions
>>> from chainer import serializers
>>> writer = extensions.snapshot_writers.ProcessWriter(
... savefun=serializers.save_npz)
>>> trainer.extend(extensions.snapshot(writer=writer), \
trigger=(1, 'epoch'))
This is the list of built-in snapshot writers.
- :class:`chainer.training.extensions.snapshot_writers.SimpleWriter`
- :class:`chainer.training.extensions.snapshot_writers.ThreadWriter`
- :class:`chainer.training.extensions.snapshot_writers.ProcessWriter`
- :class:`chainer.training.extensions.snapshot_writers.\
ThreadQueueWriter`
- :class:`chainer.training.extensions.snapshot_writers.\
ProcessQueueWriter`
.. seealso::
- :meth:`chainer.training.extensions.snapshot_object`
"""
if 'num_retain' in kwargs:
warnings.warn(
'Argument `num_retain` is deprecated. '
'Please use `n_retains` instead',
DeprecationWarning)
kwargs['n_retains'] = kwargs.pop('num_retain')
target, condition, writer, snapshot_on_error, n_retains,\
autoload = argument.parse_kwargs(
kwargs,
('target', None), ('condition', None), ('writer', None),
('snapshot_on_error', False), ('n_retains', -1),
('autoload', False))
argument.assert_kwargs_empty(kwargs)
if savefun is not None and writer is not None:
raise TypeError(
'savefun and writer arguments cannot be specified together.')
if writer is None:
if savefun is None:
savefun = npz.save_npz
writer = snapshot_writers.SimpleWriter(savefun=savefun)
return _Snapshot(
target=target, condition=condition, writer=writer, filename=filename,
snapshot_on_error=snapshot_on_error, n_retains=n_retains,
autoload=autoload)
def _always_true():
return True
class _Snapshot(extension.Extension):
"""Trainer extension to take snapshots.
This extension serializes the given object and saves it to the output
directory.
This extension is called once per epoch by default. To take a
snapshot at a different interval, a trigger object specifying the
required interval can be passed along with this extension
to the `extend()` method of the trainer.
The default priority is -100, which is lower than that of most
built-in extensions.
"""
trigger = 1, 'epoch'
priority = -100
def __init__(
self, target=None, condition=None, writer=None,
filename='snapshot_iter_{.updater.iteration}',
snapshot_on_error=False, n_retains=-1, autoload=False, **kwargs):
if condition is None:
condition = _always_true
if writer is None:
writer = snapshot_writers.SimpleWriter()
if 'num_retain' in kwargs:
warnings.warn(
'Argument `num_retain` is deprecated. '
'Please use `n_retains` instead',
DeprecationWarning)
n_retains = kwargs['num_retain']
self._target = target
self.filename = filename
self.condition = condition
self.writer = writer
self._snapshot_on_error = snapshot_on_error
self.n_retains = n_retains
self.autoload = autoload
def initialize(self, trainer):
target = trainer if self._target is None else self._target
outdir = trainer.out
if self.autoload:
# If ``autoload`` is on, this code scans the ``outdir``
# for potential snapshot files by matching the file names
# from ``filename`` format, picks up the latest one in
# terms of mtime, and tries to load it it the target or
# trainer.
filename = _find_latest_snapshot(self.filename, outdir)
if filename is None:
if chainer.is_debug():
print('No snapshot file that matches {} was found'
.format(self.filename))
else:
snapshot_file = os.path.join(outdir, filename)
# As described above (at ``autoload`` option),
# snapshot files to be autoloaded must be saved by
# ``save_npz`` . In order to support general format,
# we nned to first reconstruct the design of savefun
# and loadfun.
npz.load_npz(snapshot_file, target)
if chainer.is_debug():
print('Snapshot loaded from', snapshot_file)
if (hasattr(self.writer, '_add_cleanup_hook')
and self.n_retains > 0
and isinstance(self.filename, str)):
# This block sets a method to automatic cleanup of stale
# snapshots, when ``n_retains`` argument is positive
# number. When the given snapshot writer is Chainer's
# built-in writer, a cleanup method that is to be
# triggered right after creation of new snapshot file, is
# injected here.
def _cleanup():
files = _find_stale_snapshots(self.filename, outdir,
self.n_retains)
for file in files:
os.remove(os.path.join(outdir, file))
self.writer._add_cleanup_hook(_cleanup)
def on_error(self, trainer, exc, tb):
super(_Snapshot, self).on_error(trainer, exc, tb)
if self._snapshot_on_error:
self._make_snapshot(trainer)
def __call__(self, trainer):
if self.condition():
self._make_snapshot(trainer)
def _make_snapshot(self, trainer):
target = trainer if self._target is None else self._target
serialized_target = npz.serialize(target)
filename = self.filename
if callable(filename):
filename = filename(trainer)
else:
filename = filename.format(trainer)
outdir = trainer.out
self.writer(filename, outdir, serialized_target)
def finalize(self):
if hasattr(self.writer, 'finalize'):
self.writer.finalize()
| 16,835
| 38.801418
| 79
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.