repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
chainer
|
chainer-master/chainer/training/extensions/warmup_shift.py
|
from __future__ import division
from chainer.training import extension
class WarmupShift(extension.Extension):
"""Trainer extension to gradually initialize an optimizer attribute.
This extension changes an optimizer attribute evenly at the
beginning of one training.
For example, suppose that this extension is called at every iteration,
and warmup_start = x , init = y, warmup_iter = t.
Then this extension will set the corresponding attribute to from
``x`` to ``y`` evenly in first ``t`` iterations.
This extension is also called before the training loop starts by default.
Args:
attr (str): Name of the optimizer attribute to adjust.
warmup_start (float): the value of the attr at the beginning
of one training.
init (float): the value of the attr after warm up iterations.
warmup_iter (int): the number of the iterations in which the
attr changes from ``warmup_start`` to ``init``.
optimizer (~chainer.Optimizer): Target optimizer object.
If it is None, the main optimizer of the trainer is used.
"""
def __init__(self, attr, warmup_start, warmup_iter, init,
optimizer=None):
self._attr = attr
self._warmup_start = warmup_start
self._warmup_iter = warmup_iter
self._init = init
self._optimizer = optimizer
self._t = 0
def initialize(self, trainer):
optimizer = self._optimizer or trainer.updater.get_optimizer('main')
if self._warmup_start is None:
self._warmup_start = getattr(optimizer, self._attr)
else:
setattr(optimizer, self._attr, self._warmup_start)
def __call__(self, trainer):
self._t += 1
if self._t <= self._warmup_iter:
optimizer = self._optimizer or \
trainer.updater.get_optimizer('main')
value = (self._t * self._init + (self._warmup_iter - self._t)
* self._warmup_start) / self._warmup_iter
setattr(optimizer, self._attr, value)
def serialize(self, serializer):
self._t = serializer('_t', self._t)
| 2,174
| 35.864407
| 77
|
py
|
chainer
|
chainer-master/chainer/training/extensions/print_report.py
|
import os
import sys
from chainer.training import extension
from chainer.training.extensions import log_report as log_report_module
from chainer.training.extensions import util
class PrintReport(extension.Extension):
"""Trainer extension to print the accumulated results.
This extension uses the log accumulated by a :class:`LogReport` extension
to print specified entries of the log in a human-readable format.
Args:
entries (list of str): List of keys of observations to print.
log_report (str or LogReport): Log report to accumulate the
observations. This is either the name of a LogReport extensions
registered to the trainer, or a LogReport instance to use
internally.
out: Stream to print the bar. Standard output is used by default.
"""
def __init__(self, entries, log_report='LogReport', out=sys.stdout):
self._entries = entries
self._log_report = log_report
self._out = out
self._log_len = 0 # number of observations already printed
# format information
entry_widths = [max(10, len(s)) for s in entries]
header = ' '.join(('{:%d}' % w for w in entry_widths)).format(
*entries) + '\n'
self._header = header # printed at the first call
templates = []
for entry, w in zip(entries, entry_widths):
templates.append((entry, '{:<%dg} ' % w, ' ' * (w + 2)))
self._templates = templates
def __call__(self, trainer):
out = self._out
if self._header:
out.write(self._header)
self._header = None
log_report = self._log_report
if isinstance(log_report, str):
log_report = trainer.get_extension(log_report)
elif isinstance(log_report, log_report_module.LogReport):
log_report(trainer) # update the log report
else:
raise TypeError('log report has a wrong type %s' %
type(log_report))
log = log_report.log
log_len = self._log_len
while len(log) > log_len:
# delete the printed contents from the current cursor
if os.name == 'nt':
util.erase_console(0, 0)
else:
out.write('\033[J')
self._print(log[log_len])
log_len += 1
self._log_len = log_len
def serialize(self, serializer):
log_report = self._log_report
if isinstance(log_report, log_report_module.LogReport):
log_report.serialize(serializer['_log_report'])
def _print(self, observation):
out = self._out
for entry, template, empty in self._templates:
if entry in observation:
out.write(template.format(observation[entry]))
else:
out.write(empty)
out.write('\n')
if hasattr(out, 'flush'):
out.flush()
| 2,967
| 32.727273
| 77
|
py
|
chainer
|
chainer-master/chainer/training/extensions/fail_on_nonnumber.py
|
from chainer.training import extension
class FailOnNonNumber(extension.Extension):
"""Trainer extension to raise RuntimeError if parameters contain NaN or Inf.
Although parameters including non-number such as NaN and Inf are
unnecessary in most cases, :class:`~chainer.training.Trainer` will continue
to compute even if the parameters in a given optimizer diverge.
This extension is aimed to reduce unnecessary computations by throwing
``RuntimeError`` if the parameters contain NaN or Inf.
"""
def __call__(self, trainer):
optimizers = trainer.updater.get_all_optimizers()
for name, optimizer in optimizers.items():
target = optimizer.target
xp = target.xp
for param in target.params():
if not xp.isfinite(param.array).all():
raise RuntimeError(
'Kill the process since parameters in optimizer'
' \'{}\' diverge. R.I.P.'.format(name))
| 1,008
| 41.041667
| 80
|
py
|
chainer
|
chainer-master/chainer/training/extensions/computational_graph.py
|
import os
import subprocess
from chainer import computational_graph
from chainer import configuration
from chainer.training import extension
from chainer.utils import argument
from chainer import variable
def is_return_code_zero(args):
"""Return `True` if the return code of the given command
is zero.
All the messages sent to stdout or stderr are suppressed.
Args:
args (list of str): A command to execute.
"""
with open(os.devnull, 'wb') as FNULL:
try:
subprocess.check_call(args, stdout=FNULL, stderr=FNULL)
except subprocess.CalledProcessError:
# The given command returned an error
return False
except OSError:
# The given command was not found
return False
return True
def is_graphviz_available():
"""Tell whether graphviz is available or not."""
return is_return_code_zero(['dot', '-V'])
_var_style = {'shape': 'octagon', 'fillcolor': '#E0E0E0', 'style': 'filled'}
_func_style = {'shape': 'record', 'fillcolor': '#6495ED', 'style': 'filled'}
class DumpGraph(extension.Extension):
"""__init__(\
root_name, filename='cg.dot', variable_style=None, function_style=None)
Trainer extension to dump a computational graph.
This extension dumps a computational graph. The graph is output in DOT
language. If graphviz is available, this also renders and saves the image
of the computational graph.
It only dumps a graph at the first invocation.
.. note::
The computational graph is not kept by default. This
extension changes this behavior until the first invocation. **It is
strongly recommended that you use it with the default trigger setting.**
The detailed behavior of this extension is as follows.
1. In its initializer, it turns on the
``chainer.config.keep_graph_on_report`` flag.
2. At the first iteration, it dumps the graph using the graph held by
the reported variable.
3. After dumping the graph, it turns off the flag (if it was originally
turned off) so that any variable reported afterward does not hold
a computational graph.
When the ``keep_graph_on_report`` flag is turned on, the computational
graph created by the updater is kept during the invocation of
extensions. It will cause an unnecessarily large memory consumption
when an extension also uses a large amount of memory, e.g.
:class:`~chainer.training.extensions.Evaluator`.
With the default setting, the ``DumpGraph`` extension is called at the
first iteration. Since :class:`~chainer.training.extensions.Evaluator`
is not called at the first iteration in most cases, it does not cause
any memory problem.
Args:
root_name (str): Name of the root of the computational graph. The
root variable is retrieved by this name from the observation
dictionary of the trainer.
filename (str): Output file name.
For historical reasons ``out_name`` is also accepted as an alias
of this argument.
variable_style (dict): Dot node style for variables. Each variable is
rendered by an octagon by default.
function_style (dict): Dot node style for functions. Each function is
rendered by a rectangular by default.
.. seealso::
See :func:`~chainer.computational_graph.build_computational_graph`
for the ``variable_style`` and ``function_style`` arguments.
"""
default_name = 'dump_graph'
def __init__(self, root_name, filename=None,
variable_style=None, function_style=None, **kwargs):
out_name, = argument.parse_kwargs(kwargs, ('out_name', 'cg.dot'))
if filename is None:
filename = out_name
del out_name # avoid accidental use
self._root_name = root_name
self._filename = filename
if variable_style is None:
variable_style = _var_style
self._variable_style = variable_style
if function_style is None:
function_style = _func_style
self._function_style = function_style
self._original_flag = None
self._flag_called = False
def initialize(self, trainer):
if not self._flag_called:
self._original_flag = configuration.config.keep_graph_on_report
configuration.config.keep_graph_on_report = True
def trigger(self, trainer):
if self._flag_called:
return False
self._flag_called = True
return True
def __call__(self, trainer):
try:
var = trainer.observation[self._root_name]
if not isinstance(var, variable.Variable):
raise TypeError('root value is not a Variable')
cg = computational_graph.build_computational_graph(
[var],
variable_style=self._variable_style,
function_style=self._function_style
).dump()
filename = os.path.join(trainer.out, self._filename)
with open(filename, 'w') as f:
f.write(cg)
if is_graphviz_available():
img_fn = os.path.splitext(self._filename)[0] + '.png'
image_filename = os.path.join(trainer.out, img_fn)
subprocess.check_call(
['dot', '-Tpng', filename, '-o', image_filename])
finally:
configuration.config.keep_graph_on_report = self._original_flag
def serialize(self, serializer):
self._original_flag = serializer('_original_flag', self._original_flag)
self._flag_called = serializer('_flag_called', self._flag_called)
| 5,787
| 36.584416
| 79
|
py
|
chainer
|
chainer-master/chainer/training/extensions/variable_statistics_plot.py
|
from __future__ import division
import os
import warnings
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.training import extension
from chainer.training import trigger as trigger_module
from chainer.utils import argument
_available = None
def _try_import_matplotlib():
global matplotlib, _available
global _plot_color, _plot_color_trans, _plot_common_kwargs
try:
import matplotlib
_available = True
except ImportError:
_available = False
if _available:
if hasattr(matplotlib.colors, 'to_rgba'):
_to_rgba = matplotlib.colors.to_rgba
else:
# For matplotlib 1.x
_to_rgba = matplotlib.colors.ColorConverter().to_rgba
_plot_color = _to_rgba('#1f77b4') # C0 color
_plot_color_trans = _plot_color[:3] + (0.2,) # apply alpha
_plot_common_kwargs = {
'alpha': 0.2, 'linewidth': 0, 'color': _plot_color_trans}
def _check_available():
if _available is None:
_try_import_matplotlib()
if not _available:
warnings.warn('matplotlib is not installed on your environment, '
'so nothing will be plotted at this time. '
'Please install matplotlib to plot figures.\n\n'
' $ pip install matplotlib\n')
def _unpack_variables(x, memo=None):
if memo is None:
memo = ()
if isinstance(x, chainer.Variable):
memo += (x,)
elif isinstance(x, chainer.Link):
memo += tuple(x.params(include_uninit=True))
elif isinstance(x, (list, tuple)):
for xi in x:
memo += _unpack_variables(xi)
return memo
class Reservoir(object):
"""Reservoir sample with a fixed sized buffer."""
def __init__(self, size, data_shape, dtype=numpy.float32):
self.size = size
self.data = numpy.zeros((size,) + data_shape, dtype=dtype)
self.idxs = numpy.zeros((size,), dtype=numpy.int32)
self.counter = 0
def add(self, x, idx=None):
if self.counter < self.size:
self.data[self.counter] = x
self.idxs[self.counter] = idx or self.counter
elif self.counter >= self.size and \
numpy.random.random() < self.size / float(self.counter + 1):
i = numpy.random.randint(self.size)
self.data[i] = x
self.idxs[i] = idx or self.counter
self.counter += 1
def get_data(self):
idxs = self.idxs[:min(self.counter, self.size)]
sorted_args = numpy.argsort(idxs)
return idxs[sorted_args], self.data[sorted_args]
class Statistician(object):
"""Helper to compute basic NumPy-like statistics."""
def __init__(self, collect_mean, collect_std, percentile_sigmas):
self.collect_mean = collect_mean
self.collect_std = collect_std
self.percentile_sigmas = percentile_sigmas
def __call__(self, x, axis=0, dtype=None, xp=None):
if axis is None:
axis = tuple(range(x.ndim))
elif not isinstance(axis, (tuple, list)):
axis = axis,
return self.collect(x, axis)
def collect(self, x, axis):
out = dict()
if self.collect_mean:
out['mean'] = x.mean(axis=axis)
if self.collect_std:
out['std'] = x.std(axis=axis)
if self.percentile_sigmas:
xp = backend.get_array_module(x)
p = xp.percentile(x, self.percentile_sigmas, axis=axis)
out['percentile'] = p
return out
class VariableStatisticsPlot(extension.Extension):
"""__init__(\
targets, max_sample_size=1000, report_data=True, report_grad=True, \
plot_mean=True, plot_std=True, \
percentile_sigmas=(0, 0.13, 2.28, 15.87, 50, 84.13, 97.72, 99.87, 100), \
trigger=(1, 'epoch'), filename='statistics.png', figsize=None, marker=None, \
grid=True)
Trainer extension to plot statistics for :class:`~chainer.Variable`\\s.
This extension collects statistics for a single :class:`Variable`, a list
of :class:`Variable`\\s or similarly a single or a list of
:class:`Link`\\s containing one or more :class:`Variable`\\s. In case
multiple :class:`Variable`\\s are found, the means are computed. The
collected statistics are plotted and saved as an image in the directory
specified by the :class:`Trainer`.
Statistics include mean, standard deviation and percentiles.
This extension uses reservoir sampling to preserve memory, using a fixed
size running sample. This means that collected items in the sample are
discarded uniformly at random when the number of items becomes larger
than the maximum sample size, but each item is expected to occur in the
sample with equal probability.
Args:
targets (:class:`Variable`, :class:`Link` or list of either):
Parameters for which statistics are collected.
max_sample_size (int):
Maximum number of running samples.
report_data (bool):
If ``True``, data (e.g. weights) statistics are plotted. If
``False``, they are neither computed nor plotted.
report_grad (bool):
If ``True``, gradient statistics are plotted. If ``False``, they
are neither computed nor plotted.
plot_mean (bool):
If ``True``, means are plotted. If ``False``, they are
neither computed nor plotted.
plot_std (bool):
If ``True``, standard deviations are plotted. If ``False``, they
are neither computed nor plotted.
percentile_sigmas (float or tuple of floats):
Percentiles to plot in the range :math:`[0, 100]`.
trigger:
Trigger that decides when to save the plots as an image. This is
distinct from the trigger of this extension itself. If it is a
tuple in the form ``<int>, 'epoch'`` or ``<int>, 'iteration'``, it
is passed to :class:`IntervalTrigger`.
filename (str):
Name of the output image file under the output directory.
For historical reasons ``file_name`` is also accepted as an alias
of this argument.
figsize (tuple of int):
Matlotlib ``figsize`` argument that specifies the size of the
output image.
marker (str):
Matplotlib ``marker`` argument that specified the marker style of
the plots.
grid (bool):
Matplotlib ``grid`` argument that specifies whether grids are
rendered in in the plots or not.
"""
def __init__(self, targets, max_sample_size=1000,
report_data=True, report_grad=True,
plot_mean=True, plot_std=True,
percentile_sigmas=(
0, 0.13, 2.28, 15.87, 50, 84.13, 97.72, 99.87, 100),
trigger=(1, 'epoch'), filename=None,
figsize=None, marker=None, grid=True, **kwargs):
file_name, = argument.parse_kwargs(
kwargs, ('file_name', 'statistics.png')
)
if filename is None:
filename = file_name
del file_name # avoid accidental use
self._vars = _unpack_variables(targets)
if not self._vars:
raise ValueError(
'Need at least one variables for which to collect statistics.'
'\nActual: 0 <= 0')
if not any((plot_mean, plot_std, bool(percentile_sigmas))):
raise ValueError('Nothing to plot')
self._keys = []
if report_data:
self._keys.append('data')
if report_grad:
self._keys.append('grad')
self._report_data = report_data
self._report_grad = report_grad
self._statistician = Statistician(
collect_mean=plot_mean, collect_std=plot_std,
percentile_sigmas=percentile_sigmas)
self._plot_mean = plot_mean
self._plot_std = plot_std
self._plot_percentile = bool(percentile_sigmas)
self._trigger = trigger_module.get_trigger(trigger)
self._filename = filename
self._figsize = figsize
self._marker = marker
self._grid = grid
if not self._plot_percentile:
n_percentile = 0
else:
if not isinstance(percentile_sigmas, (list, tuple)):
n_percentile = 1 # scalar, single percentile
else:
n_percentile = len(percentile_sigmas)
self._data_shape = (
len(self._keys), int(plot_mean) + int(plot_std) + n_percentile)
self._samples = Reservoir(max_sample_size, data_shape=self._data_shape)
@staticmethod
def available():
_check_available()
return _available
def __call__(self, trainer):
if self.available():
# Dynamically import pyplot to call matplotlib.use()
# after importing chainer.training.extensions
import matplotlib.pyplot as plt
else:
return
xp = backend.get_array_module(self._vars[0].data)
stats = xp.zeros(self._data_shape, dtype=xp.float32)
for i, k in enumerate(self._keys):
xs = []
for var in self._vars:
x = getattr(var, k, None)
if x is not None:
xs.append(x.ravel())
if xs:
stat_dict = self._statistician(
xp.concatenate(xs, axis=0), axis=0, xp=xp)
stat_list = []
if self._plot_mean:
stat_list.append(xp.atleast_1d(stat_dict['mean']))
if self._plot_std:
stat_list.append(xp.atleast_1d(stat_dict['std']))
if self._plot_percentile:
stat_list.append(xp.atleast_1d(stat_dict['percentile']))
stats[i] = xp.concatenate(stat_list, axis=0)
if xp == cuda.cupy:
stats = cuda.to_cpu(stats)
self._samples.add(stats, idx=trainer.updater.iteration)
if self._trigger(trainer):
file_path = os.path.join(trainer.out, self._filename)
self.save_plot_using_module(file_path, plt)
def save_plot_using_module(self, file_path, plt):
nrows = int(self._plot_mean or self._plot_std) \
+ int(self._plot_percentile)
ncols = len(self._keys)
fig, axes = plt.subplots(
nrows, ncols, figsize=self._figsize, sharex=True)
if not isinstance(axes, numpy.ndarray): # single subplot
axes = numpy.asarray([axes])
if nrows == 1:
axes = axes[None, :]
elif ncols == 1:
axes = axes[:, None]
assert axes.ndim == 2
idxs, data = self._samples.get_data()
# Offset to access percentile data from `data`
offset = int(self._plot_mean) + int(self._plot_std)
n_percentile = data.shape[-1] - offset
n_percentile_mid_floor = n_percentile // 2
n_percentile_odd = n_percentile % 2 == 1
for col in six.moves.range(ncols):
row = 0
ax = axes[row, col]
ax.set_title(self._keys[col]) # `data` or `grad`
if self._plot_mean or self._plot_std:
if self._plot_mean and self._plot_std:
ax.errorbar(
idxs, data[:, col, 0], data[:, col, 1],
color=_plot_color, ecolor=_plot_color_trans,
label='mean, std', marker=self._marker)
else:
if self._plot_mean:
label = 'mean'
elif self._plot_std:
label = 'std'
ax.plot(
idxs, data[:, col, 0], color=_plot_color, label=label,
marker=self._marker)
row += 1
if self._plot_percentile:
ax = axes[row, col]
for i in six.moves.range(n_percentile_mid_floor + 1):
if n_percentile_odd and i == n_percentile_mid_floor:
# Enters at most once per sub-plot, in case there is
# only a single percentile to plot or when this
# percentile is the mid percentile and the number of
# percentiles are odd
ax.plot(
idxs, data[:, col, offset + i], color=_plot_color,
label='percentile', marker=self._marker)
else:
if i == n_percentile_mid_floor:
# Last percentiles and the number of all
# percentiles are even
label = 'percentile'
else:
label = '_nolegend_'
ax.fill_between(
idxs,
data[:, col, offset + i],
data[:, col, -i - 1],
label=label,
**_plot_common_kwargs)
ax.set_xlabel('iteration')
for ax in axes.ravel():
ax.legend()
if self._grid:
ax.grid()
ax.set_axisbelow(True)
fig.savefig(file_path)
plt.close()
| 13,511
| 35.617886
| 79
|
py
|
chainer
|
chainer-master/chainer/training/extensions/evaluator.py
|
import copy
import datetime
import warnings
import six
from chainer import backend
from chainer import configuration
from chainer.dataset import convert
from chainer.dataset import iterator as iterator_module
from chainer import function
from chainer import iterators
from chainer import link
from chainer import reporter as reporter_module
from chainer.training import extension
from chainer.training.extensions import util
from chainer.utils import argument
class Evaluator(extension.Extension):
"""__init__(self, iterator, target, converter=convert.concat_examples, \
device=None, eval_hook=None, eval_func=None, *, progress_bar=False)
Trainer extension to evaluate models on a validation set.
This extension evaluates the current models by a given evaluation function.
It creates a :class:`~chainer.Reporter` object to store values observed in
the evaluation function on each iteration. The report for all iterations
are aggregated to :class:`~chainer.DictSummary`. The collected mean values
are further reported to the reporter object of the trainer, where the name
of each observation is prefixed by the evaluator name. See
:class:`~chainer.Reporter` for details in naming rules of the reports.
Evaluator has a structure to customize similar to that of
:class:`~chainer.training.updaters.StandardUpdater`.
The main differences are:
- There are no optimizers in an evaluator. Instead, it holds links
to evaluate.
- An evaluation loop function is used instead of an update function.
- Preparation routine can be customized, which is called before each
evaluation. It can be used, e.g., to initialize the state of stateful
recurrent networks.
There are two ways to modify the evaluation behavior besides setting a
custom evaluation function. One is by setting a custom evaluation loop via
the ``eval_func`` argument. The other is by inheriting this class and
overriding the :meth:`evaluate` method. In latter case, users have to
create and handle a reporter object manually. Users also have to copy the
iterators before using them, in order to reuse them at the next time of
evaluation. In both cases, the functions are called in testing mode
(i.e., ``chainer.config.train`` is set to ``False``).
This extension is called at the end of each epoch by default.
Args:
iterator: Dataset iterator for the validation dataset. It can also be
a dictionary of iterators. If this is just an iterator, the
iterator is registered by the name ``'main'``.
target: Link object or a dictionary of links to evaluate. If this is
just a link object, the link is registered by the name ``'main'``.
converter: Converter function to build input arrays.
:func:`~chainer.dataset.concat_examples` is used by default.
device: Device to which the validation data is sent. Negative value
indicates the host memory (CPU).
eval_hook: Function to prepare for each evaluation process. It is
called at the beginning of the evaluation. The evaluator extension
object is passed at each call.
eval_func: Evaluation function called at each iteration. The target
link to evaluate as a callable is used by default.
progress_bar: Boolean flag to show a progress bar while training,
which is similar to
:class:`~chainer.training.extensions.ProgressBar`.
(default: ``False``)
.. warning::
The argument ``progress_bar`` is experimental.
The interface can change in the future.
Attributes:
converter: Converter function.
device: Device to which the validation data is sent.
eval_hook: Function to prepare for each evaluation process.
eval_func: Evaluation function called at each iteration.
"""
trigger = 1, 'epoch'
default_name = 'validation'
priority = extension.PRIORITY_WRITER
name = None
def __init__(self, iterator, target, converter=convert.concat_examples,
device=None, eval_hook=None, eval_func=None, **kwargs):
progress_bar, = argument.parse_kwargs(kwargs, ('progress_bar', False))
if device is not None:
device = backend.get_device(device)
if isinstance(iterator, iterator_module.Iterator):
iterator = {'main': iterator}
self._iterators = iterator
if isinstance(target, link.Link):
target = {'main': target}
self._targets = target
self.converter = converter
self.device = device
self.eval_hook = eval_hook
self.eval_func = eval_func
self._progress_bar = progress_bar
for key, iter in six.iteritems(iterator):
if (isinstance(iter, (iterators.SerialIterator,
iterators.MultiprocessIterator,
iterators.MultithreadIterator)) and
getattr(iter, 'repeat', False)):
msg = 'The `repeat` property of the iterator {} '
'is set to `True`. Typically, the evaluator sweeps '
'over iterators until they stop, '
'but as the property being `True`, this iterator '
'might not stop and evaluation could go into '
'an infinite loop. '
'We recommend to check the configuration '
'of iterators'.format(key)
warnings.warn(msg)
def get_iterator(self, name):
"""Returns the iterator of the given name."""
return self._iterators[name]
def get_all_iterators(self):
"""Returns a dictionary of all iterators."""
return dict(self._iterators)
def get_target(self, name):
"""Returns the target link of the given name."""
return self._targets[name]
def get_all_targets(self):
"""Returns a dictionary of all target links."""
return dict(self._targets)
def __call__(self, trainer=None):
"""Executes the evaluator extension.
Unlike usual extensions, this extension can be executed without passing
a trainer object. This extension reports the performance on validation
dataset using the :func:`~chainer.report` function. Thus, users can use
this extension independently from any trainer by manually configuring
a :class:`~chainer.Reporter` object.
Args:
trainer (~chainer.training.Trainer): Trainer object that invokes
this extension. It can be omitted in case of calling this
extension manually.
Returns:
dict: Result dictionary that contains mean statistics of values
reported by the evaluation function.
"""
# set up a reporter
reporter = reporter_module.Reporter()
if self.name is not None:
prefix = self.name + '/'
else:
prefix = ''
for name, target in six.iteritems(self._targets):
reporter.add_observer(prefix + name, target)
reporter.add_observers(prefix + name,
target.namedlinks(skipself=True))
with reporter:
with configuration.using_config('train', False):
result = self.evaluate()
reporter_module.report(result)
return result
def evaluate(self):
"""Evaluates the model and returns a result dictionary.
This method runs the evaluation loop over the validation dataset. It
accumulates the reported values to :class:`~chainer.DictSummary` and
returns a dictionary whose values are means computed by the summary.
Note that this function assumes that the main iterator raises
``StopIteration`` or code in the evaluation loop raises an exception.
So, if this assumption is not held, the function could be caught in
an infinite loop.
Users can override this method to customize the evaluation routine.
.. note::
This method encloses :attr:`eval_func` calls with
:func:`function.no_backprop_mode` context, so all calculations
using :class:`~chainer.FunctionNode`\\s inside
:attr:`eval_func` do not make computational graphs. It is for
reducing the memory consumption.
Returns:
dict: Result dictionary. This dictionary is further reported via
:func:`~chainer.report` without specifying any observer.
"""
iterator = self._iterators['main']
eval_func = self.eval_func or self._targets['main']
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
warnings.warn(
'This iterator does not have the reset method. Evaluator '
'copies the iterator instead of resetting. This behavior is '
'deprecated. Please implement the reset method.',
DeprecationWarning)
it = copy.copy(iterator)
summary = reporter_module.DictSummary()
if self._progress_bar:
pbar = _IteratorProgressBar(iterator=it)
for batch in it:
observation = {}
with reporter_module.report_scope(observation):
in_arrays = convert._call_converter(
self.converter, batch, self.device)
with function.no_backprop_mode():
if isinstance(in_arrays, tuple):
eval_func(*in_arrays)
elif isinstance(in_arrays, dict):
eval_func(**in_arrays)
else:
eval_func(in_arrays)
summary.add(observation)
if self._progress_bar:
pbar.update()
if self._progress_bar:
pbar.close()
return summary.compute_mean()
def finalize(self):
"""Finalizes the evaluator object.
This method calls the `finalize` method of each iterator that
this evaluator has.
It is called at the end of training loops.
"""
for iterator in six.itervalues(self._iterators):
iterator.finalize()
class _IteratorProgressBar(util.ProgressBar):
def __init__(self, iterator, bar_length=None, out=None):
if not (hasattr(iterator, 'current_position') and
hasattr(iterator, 'epoch_detail')):
raise TypeError('Iterator must have the following attributes '
'to enable a progress bar: '
'current_position, epoch_detail')
self._iterator = iterator
super(_IteratorProgressBar, self).__init__(
bar_length=bar_length, out=out)
def get_lines(self):
iteration = self._iterator.current_position
epoch_detail = self._iterator.epoch_detail
epoch_size = getattr(self._iterator, '_epoch_size', None)
lines = []
rate = epoch_detail
marks = '#' * int(rate * self._bar_length)
lines.append('validation [{}{}] {:6.2%}\n'.format(
marks, '.' * (self._bar_length - len(marks)), rate))
if epoch_size:
lines.append('{:10} / {} iterations\n'
.format(iteration, epoch_size))
else:
lines.append('{:10} iterations\n'.format(iteration))
speed_t, speed_e = self.update_speed(iteration, epoch_detail)
estimated_time = (1.0 - epoch_detail) / speed_e
lines.append('{:10.5g} iters/sec. Estimated time to finish: {}.\n'
.format(speed_t,
datetime.timedelta(seconds=estimated_time)))
return lines
| 11,956
| 38.075163
| 79
|
py
|
chainer
|
chainer-master/chainer/training/extensions/polynomial_shift.py
|
from __future__ import division
import numpy
from chainer.training import extension
class PolynomialShift(extension.Extension):
"""Trainer extension to polynomially shift an optimizer attribute.
This extension polynomially decreases the specified attribute of the
optimizer. The typical use case is a polynomial decay of the
learning rate at each iteration.
For example, suppose that this extension is invoke at every iteration.
Then this extension will set the corresponding attribute to
``init_value * (1 - i / max_iter) ^ rate`` at the ``i``-th iteration, where
the ``max_iter`` is the number of iterations to be running.
This extension is also called before the training loop starts by default.
Args:
attr (str): Name of the attribute to shift.
rate (float): Exponent of polynomial shift.
max_count (int): Number of this extension to be invoked.
init (float): Initial value of the attribute. If it is ``None``, the
extension extracts the attribute at the first call and uses it as
the initial value.
target (float): Target value of the attribute. If the attribute reaches
this value, the shift stops.
optimizer (~chainer.Optimizer): Target optimizer to adjust the
attribute. If it is ``None``, the main optimizer of the updater is
used.
"""
invoke_before_training = True
def __init__(self, attr, rate, max_count, init=None, target=None,
optimizer=None):
self._attr = attr
self._rate = rate
self._init = init
self._target = target
self._optimizer = optimizer
self._t = 0
self._max_count = max_count
self._last_value = None
def initialize(self, trainer):
optimizer = self._get_optimizer(trainer)
# ensure that _init is set
if self._init is None:
self._init = getattr(optimizer, self._attr)
if self._last_value is not None: # resuming from a snapshot
self._update_value(optimizer, self._last_value)
else:
self._update_value(optimizer, self._init)
def __call__(self, trainer):
self._t += 1
optimizer = self._get_optimizer(trainer)
decay = max(1 - self._t / self._max_count, 0)
value = self._init * decay ** self._rate
if self._target is not None:
if self._rate > 0:
# almost same as value = min(value, self._target), but this
# line supports negative values, too
if self._target / value > 1:
value = self._target
else:
# ditto
if self._target / value < 1:
value = self._target
self._update_value(optimizer, value)
def serialize(self, serializer):
self._t = serializer('_t', self._t)
self._last_value = serializer('_last_value', self._last_value)
if isinstance(self._last_value, numpy.ndarray):
self._last_value = self._last_value.item()
def _get_optimizer(self, trainer):
return self._optimizer or trainer.updater.get_optimizer('main')
def _update_value(self, optimizer, value):
setattr(optimizer, self._attr, value)
self._last_value = value
| 3,354
| 34.315789
| 79
|
py
|
chainer
|
chainer-master/chainer/training/extensions/micro_average.py
|
from chainer import reporter
from chainer.training import extension
from chainer.training import util
class MicroAverage(extension.Extension):
"""Calculates micro-average ratio.
Give :math:`N` batches and values :math:`\\{n_1, \\dots, n_N\\}` and
:math:`\\{d_1, \\dots, d_N\\}`, this extension calculates micro-average of
these ratio defined as:
.. math::
\\frac{\\sum_i^N n_i}{\\sum_i^N d_i}.
A user usually uses the number of examples which a system correctly
predict as :math:`n_i` and the number of total examples in :math:`i`-th
batch as :math:`d_i`. This value is called macro-average of precision.
Note that macro-average is defined as:
.. math::
\\frac{1}{N}\\sum_i^N (n_i / d_i),
It is same to the micro-average when each mini-batch has the same
:math:`d_i`.
You need to report numerator value (the number of correct examples) and
denominator value (the number of examples) in your model.
>>> class MyModel(chainer.Link):
... def __call__(self, x, y):
... loss = F.softmax_cross_entropy(x, y)
... correct = (x.data.argmax(axis=1) == y.data).sum()
... total = len(y.data)
... reporter.report({'correct': correct, 'total': total}, self)
... return loss
And then, make an extension with corresponding reporting keys and
register it.
>>> ext = extensions.MicroAverage(
... 'main/correct', 'main/total', 'main/accuracy')
Args:
numerator_key (str): Key string of obserbation storing a numerator
value.
denominator_key (str): Key string of obserbation storing a denominator
value.
result_key (str): Key string of obserbation to store a result.
trigger: Trigger that decides when to calcurate average.
This is distinct from the trigger of this extension itself.
If it is a tuple in the form ``<int>, 'epoch'`` or
``<int>, 'iteration'``, it is passed to :class:`IntervalTrigger`.
"""
priority = extension.PRIORITY_EDITOR
def __init__(
self, numerator_key, denominator_key, result_key,
trigger=(1, 'epoch')):
self._trigger = util.get_trigger(trigger)
self._numerator_key = numerator_key
self._denominator_key = denominator_key
self._result_key = result_key
self._numerator = 0
self._denominator = 0
def __call__(self, trainer):
observation = trainer.observation
if not (self._numerator_key in observation and
self._denominator_key in observation):
return
self._numerator += observation[self._numerator_key]
self._denominator += observation[self._denominator_key]
if self._trigger(trainer):
result = float(self._numerator) / self._denominator
self._numerator = 0
self._denominator = 0
reporter.report({self._result_key: result})
def serialize(self, serializer):
self._numerator = serializer('_numerator', self._numerator)
self._denominator = serializer('_denominator', self._denominator)
| 3,192
| 33.706522
| 78
|
py
|
chainer
|
chainer-master/chainer/training/extensions/util.py
|
import collections
import os
import sys
import time
if os.name == 'nt':
import ctypes
_STD_OUTPUT_HANDLE = -11
class _COORD(ctypes.Structure):
_fields_ = [('X', ctypes.c_short), ('Y', ctypes.c_short)]
class _SMALL_RECT(ctypes.Structure):
_fields_ = [('Left', ctypes.c_short), ('Top', ctypes.c_short),
('Right', ctypes.c_short), ('Bottom', ctypes.c_short)]
class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
_fields_ = [('dwSize', _COORD), ('dwCursorPosition', _COORD),
('wAttributes', ctypes.c_ushort),
('srWindow', _SMALL_RECT),
('dwMaximumWindowSize', _COORD)]
def set_console_cursor_position(x, y):
"""Set relative cursor position from current position to (x,y)"""
whnd = ctypes.windll.kernel32.GetStdHandle(_STD_OUTPUT_HANDLE)
csbi = _CONSOLE_SCREEN_BUFFER_INFO()
ctypes.windll.kernel32.GetConsoleScreenBufferInfo(whnd,
ctypes.byref(csbi))
cur_pos = csbi.dwCursorPosition
pos = _COORD(cur_pos.X + x, cur_pos.Y + y)
ctypes.windll.kernel32.SetConsoleCursorPosition(whnd, pos)
def erase_console(x, y, mode=0):
"""Erase screen.
Mode=0: From (x,y) position down to the bottom of the screen.
Mode=1: From (x,y) position down to the beginning of line.
Mode=2: Hole screen
"""
whnd = ctypes.windll.kernel32.GetStdHandle(_STD_OUTPUT_HANDLE)
csbi = _CONSOLE_SCREEN_BUFFER_INFO()
ctypes.windll.kernel32.GetConsoleScreenBufferInfo(whnd,
ctypes.byref(csbi))
cur_pos = csbi.dwCursorPosition
wr = ctypes.c_ulong()
if mode == 0:
num = csbi.srWindow.Right * (csbi.srWindow.Bottom -
cur_pos.Y) - cur_pos.X
ctypes.windll.kernel32.FillConsoleOutputCharacterA(
whnd, ord(' '), num, cur_pos, ctypes.byref(wr))
elif mode == 1:
num = cur_pos.X
ctypes.windll.kernel32.FillConsoleOutputCharacterA(
whnd, ord(' '), num, _COORD(0, cur_pos.Y), ctypes.byref(wr))
elif mode == 2:
os.system('cls')
class ProgressBar(object):
def __init__(self, bar_length=None, out=None):
self._bar_length = 50 if bar_length is None else bar_length
self._out = sys.stdout if out is None else out
self._recent_timing = collections.deque([], maxlen=100)
def update_speed(self, iteration, epoch_detail):
now = time.time()
self._recent_timing.append((iteration, epoch_detail, now))
old_t, old_e, old_sec = self._recent_timing[0]
span = now - old_sec
if span != 0:
speed_t = (iteration - old_t) / span
speed_e = (epoch_detail - old_e) / span
else:
speed_t = float('inf')
speed_e = float('inf')
return speed_t, speed_e
def get_lines(self):
raise NotImplementedError
def update(self):
self.erase_console()
lines = self.get_lines()
for line in lines:
self._out.write(line)
self.move_cursor_up(len(lines))
self.flush()
def close(self):
self.erase_console()
self.flush()
def erase_console(self):
if os.name == 'nt':
erase_console(0, 0)
else:
self._out.write('\033[J')
def move_cursor_up(self, n):
# move the cursor to the head of the progress bar
if os.name == 'nt':
set_console_cursor_position(0, - n)
else:
self._out.write('\033[{:d}A'.format(n))
def flush(self):
if hasattr(self._out, 'flush'):
self._out.flush()
| 3,869
| 32.947368
| 77
|
py
|
chainer
|
chainer-master/chainer/training/extensions/variable_unchain.py
|
import six
from chainer import configuration
from chainer.training import extension
from chainer import variable
class unchain_variables(extension.Extension):
"""Trainer extension to unchain all comptational graphs.
This extenstion unchains all comptational graphs after all extensions are
run to release memory and to avoid memory leak.
This extension can be used as a last resort when there is an extension that
use a variable graph and cannot release the graph in itself.
It observes the previous ``chainer.config.keep_graph_on_report`` flag.
The extension is triggered when the flag is turned on.
"""
priority = 0
def __init__(self):
self._prev_flag = None
def initialize(self, _):
self._prev_flag = configuration.config.keep_graph_on_report
def trigger(self, _):
flag = self._prev_flag
self._prev_flag = configuration.config.keep_graph_on_report
return flag
def __call__(self, trainer):
for var in six.itervalues(trainer.observation):
if isinstance(var, variable.Variable):
var.unchain_backward()
| 1,139
| 30.666667
| 79
|
py
|
chainer
|
chainer-master/chainer/training/extensions/value_observation.py
|
from chainer.training import extension
def observe_value(observation_key, target_func):
"""Returns a trainer extension to continuously record a value.
Args:
observation_key (str): Key of observation to record.
target_func (function): Function that returns the value to record.
It must take one argument: :class:~chainer.training.Trainer object.
Returns:
The extension function.
This extension is triggered each epoch by default.
To change this, use the ``trigger`` argument with the
:meth:`Trainer.extend() <chainer.training.Trainer.extend>` method.
"""
@extension.make_extension(
trigger=(1, 'epoch'), priority=extension.PRIORITY_WRITER)
def _observe_value(trainer):
trainer.observation[observation_key] = target_func(trainer)
return _observe_value
def observe_lr(optimizer_name='main', observation_key='lr'):
"""Returns a trainer extension to record the learning rate.
Args:
optimizer_name (str): Name of optimizer whose learning rate is
recorded.
observation_key (str): Key of observation to record.
Returns:
The extension function.
This extension is triggered each epoch by default.
To change this, use the ``trigger`` argument with the
:meth:`Trainer.extend() <chainer.training.Trainer.extend>` method.
"""
return observe_value(
observation_key,
lambda trainer: trainer.updater.get_optimizer(optimizer_name).lr)
| 1,504
| 32.444444
| 79
|
py
|
chainer
|
chainer-master/chainer/training/extensions/parameter_statistics.py
|
import numpy
import six
import chainer
from chainer import backend
from chainer import reporter
from chainer.training import extension
from chainer.training import trigger as trigger_module
_default_statistics = {
'mean': lambda x: backend.get_array_module(x).mean(x),
'std': lambda x: backend.get_array_module(x).std(x),
'min': lambda x: backend.get_array_module(x).min(x),
'max': lambda x: backend.get_array_module(x).max(x),
'zeros': lambda x: backend.get_array_module(x).count_nonzero(x == 0),
'percentile': lambda x: backend.get_array_module(x).percentile(
x, (0.13, 2.28, 15.87, 50, 84.13, 97.72, 99.87))
}
class ParameterStatistics(extension.Extension):
"""Trainer extension to report parameter statistics.
Statistics are collected and reported for a given :class:`~chainer.Link`
or an iterable of :class:`~chainer.Link`\\ s. If a link contains child
links, the statistics are reported separately for each child.
Any function that takes a one-dimensional :class:`numpy.ndarray` or a
:class:`cupy.ndarray` and outputs a single or multiple real numbers can be
registered to handle the collection of statistics, e.g.
:meth:`numpy.ndarray.mean`.
The keys of reported statistics follow the convention of link name
followed by parameter name, attribute name and function name, e.g.
``VGG16Layers/conv1_1/W/data/mean``. They are prepended with an optional
prefix and appended with integer indices if the statistics generating
function return multiple values.
Args:
links (~chainer.Link or iterable of ~chainer.Link): Link(s) containing
the parameters to observe. The link is expected to have a ``name``
attribute which is used as a part of the report key.
statistics (dict or 'default'): Dictionary with function name to
function mappings.
The name is a string and is used as a part of the report key. The
function is responsible for generating the statistics.
If the special value ``'default'`` is specified, the default
statistics functions will be used.
report_params (bool): If ``True``, report statistics for parameter
values such as weights and biases.
report_grads (bool): If ``True``, report statistics for parameter
gradients.
prefix (str): Optional prefix to prepend to the report keys.
trigger: Trigger that decides when to aggregate the results and report
the values.
skip_nan_params (bool): If ``True``, statistics are not computed for
parameters including NaNs and a single NaN value is immediately
reported instead. Otherwise, this extension will simply try to
compute the statistics without performing any checks for NaNs.
.. note::
The default statistic functions are as follows:
* ``'mean'`` (``xp.mean(x)``)
* ``'std'`` (``xp.std(x)``)
* ``'min'`` (``xp.min(x)``)
* ``'max'`` (``xp.max(x)``)
* ``'zeros'`` (``xp.count_nonzero(x == 0)``)
* ``'percentile'`` (``xp.percentile(x, \
(0.13, 2.28, 15.87, 50, 84.13, 97.72, 99.87))``)
"""
default_name = 'parameter_statistics'
priority = extension.PRIORITY_WRITER
# prefix ends with a '/' and param_name is preceded by a '/'
report_key_template = ('{prefix}{link_name}{param_name}/{attr_name}/'
'{function_name}')
default_statistics = _default_statistics
def __init__(self, links, statistics='default',
report_params=True, report_grads=True, prefix=None,
trigger=(1, 'epoch'), skip_nan_params=False):
if not isinstance(links, (list, tuple)):
links = links,
self._links = links
if statistics is None:
statistics = {}
elif statistics == 'default':
statistics = self.default_statistics
self._statistics = dict(statistics)
attrs = []
if report_params:
attrs.append('data')
if report_grads:
attrs.append('grad')
self._attrs = attrs
self._prefix = prefix
self._trigger = trigger_module.get_trigger(trigger)
self._summary = reporter.DictSummary()
self._skip_nan_params = skip_nan_params
def __call__(self, trainer):
"""Execute the statistics extension.
Collect statistics for the current state of parameters.
Note that this method will merely update its statistic summary, unless
the internal trigger is fired. If the trigger is fired, the summary
will also be reported and then reset for the next accumulation.
Args:
trainer (~chainer.training.Trainer): Associated trainer that
invoked this extension.
"""
statistics = {}
for link in self._links:
link_name = getattr(link, 'name', 'None')
for param_name, param in link.namedparams():
for attr_name in self._attrs:
for function_name, function in \
six.iteritems(self._statistics):
# Get parameters as a flattened one-dimensional array
# since the statistics function should make no
# assumption about the axes
params = getattr(param, attr_name).ravel()
if (self._skip_nan_params
and (
backend.get_array_module(params).isnan(params)
.any())):
value = numpy.nan
else:
value = function(params)
key = self.report_key_template.format(
prefix=self._prefix + '/' if self._prefix else '',
link_name=link_name,
param_name=param_name,
attr_name=attr_name,
function_name=function_name
)
if (isinstance(value, chainer.get_array_types())
and value.size > 1):
# Append integer indices to the keys if the
# statistic function return multiple values
statistics.update({'{}/{}'.format(key, i): v for
i, v in enumerate(value)})
else:
statistics[key] = value
self._summary.add(statistics)
if self._trigger(trainer):
reporter.report(self._summary.compute_mean())
self._summary = reporter.DictSummary() # Clear summary
def register_statistics(self, name, function):
"""Register a function to compute a certain statistic.
The registered function will be called each time the extension runs and
the results will be included in the report.
Args:
name (str): Name of the statistic.
function: Function to generate the statistic. Any function that
takes a one-dimensional :class:`numpy.ndarray` or a
:class:`cupy.ndarray` and outputs a single or multiple real
numbers is allowed.
"""
self._statistics[name] = function
| 7,507
| 41.179775
| 79
|
py
|
chainer
|
chainer-master/chainer/training/extensions/__init__.py
|
# import classes and functions
from chainer.training.extensions._snapshot import snapshot # NOQA
from chainer.training.extensions._snapshot import snapshot_object # NOQA
from chainer.training.extensions.computational_graph import DumpGraph # NOQA
from chainer.training.extensions.evaluator import Evaluator # NOQA
from chainer.training.extensions.exponential_shift import ExponentialShift # NOQA
from chainer.training.extensions.fail_on_nonnumber import FailOnNonNumber # NOQA
from chainer.training.extensions.inverse_shift import InverseShift # NOQA
from chainer.training.extensions.linear_shift import LinearShift # NOQA
from chainer.training.extensions.log_report import LogReport # NOQA
from chainer.training.extensions.micro_average import MicroAverage # NOQA
from chainer.training.extensions.multistep_shift import MultistepShift # NOQA
from chainer.training.extensions.parameter_statistics import ParameterStatistics # NOQA
from chainer.training.extensions.plot_report import PlotReport # NOQA
from chainer.training.extensions.polynomial_shift import PolynomialShift # NOQA
from chainer.training.extensions.print_report import PrintReport # NOQA
from chainer.training.extensions.progress_bar import ProgressBar # NOQA
from chainer.training.extensions.step_shift import StepShift # NOQA
from chainer.training.extensions.value_observation import observe_lr # NOQA
from chainer.training.extensions.value_observation import observe_value # NOQA
from chainer.training.extensions.variable_statistics_plot import VariableStatisticsPlot # NOQA
from chainer.training.extensions.variable_unchain import unchain_variables # NOQA
from chainer.training.extensions.warmup_shift import WarmupShift # NOQA
# Alias
from chainer.training.extensions.computational_graph import DumpGraph as dump_graph # NOQA
| 1,820
| 66.444444
| 95
|
py
|
chainer
|
chainer-master/chainer/training/extensions/step_shift.py
|
from __future__ import division
import numpy
from chainer.training import extension
class StepShift(extension.Extension):
"""Trainer extension to shift an optimizer attribute in "steps".
This extension multiplies the specified attribute of the optimizer in
"steps". The typical use case is to scale the attribute at every ``k``\\ th
iteration.
For example, suppose that this extension is invoked at every iteration,
then given ``k``, a multiplier ``gamma`` and an initial value
``init``, the optimizer attribute is set to
``init * gamma ^ (floor(i / k))``, where ``i`` represents the index of the
current iteration.
This extension is also called before the training loop starts by default.
Args:
attr (str): Name of the optimizer attribute to adjust.
gamma (float): The multiplier.
step (int): The interval for the multiplication, i.e., ``k``.
init (float): Initial value of the attribute. If it is ``None``, the
extension extracts the attribute at the first call and uses it as
the initial value.
target (float): Target value of the attribute. If the attribute reaches
this value, the shift stops.
optimizer (~chainer.Optimizer): Target optimizer object. If it is None,
the main optimizer of the trainer is used.
"""
def __init__(self, attr, gamma, step, init=None, target=None,
optimizer=None):
self._attr = attr
self._gamma = gamma
self._step = step
self._init = init
self._target = target
self._optimizer = optimizer
self._t = 0
self._last_value = None
def initialize(self, trainer):
optimizer = self._get_optimizer(trainer)
# ensure that _init is set
if self._init is None:
self._init = getattr(optimizer, self._attr)
if self._last_value is not None:
value = self._last_value
else:
value = self._init
self._update_value(optimizer, value)
def __call__(self, trainer):
self._t += 1
optimizer = self._get_optimizer(trainer)
value = self._init * self._gamma ** numpy.floor(self._t / self._step)
if self._target is not None:
if self._gamma > 1:
# almost same as value = min(value, self._target), but this
# line supports negative values, too
if value / self._target > 1:
value = self._target
else:
# ditto
if value / self._target < 1:
value = self._target
self._update_value(optimizer, value)
def serialize(self, serializer):
self._t = serializer('_t', self._t)
self._last_value = serializer('_last_value', self._last_value)
if isinstance(self._last_value, numpy.ndarray):
self._last_value = self._last_value.item()
def _get_optimizer(self, trainer):
return self._optimizer or trainer.updater.get_optimizer('main')
def _update_value(self, optimizer, value):
setattr(optimizer, self._attr, value)
self._last_value = value
| 3,214
| 35.534091
| 79
|
py
|
chainer
|
chainer-master/chainer/training/extensions/log_report.py
|
import json
import os
import shutil
import warnings
import six
from chainer import reporter
from chainer import serializer as serializer_module
from chainer.training import extension
from chainer.training import trigger as trigger_module
from chainer import utils
from chainer.utils import argument
class LogReport(extension.Extension):
"""__init__(\
keys=None, trigger=(1, 'epoch'), postprocess=None, filename='log')
Trainer extension to output the accumulated results to a log file.
This extension accumulates the observations of the trainer to
:class:`~chainer.DictSummary` at a regular interval specified by a supplied
trigger, and writes them into a log file in JSON format.
There are two triggers to handle this extension. One is the trigger to
invoke this extension, which is used to handle the timing of accumulating
the results. It is set to ``1, 'iteration'`` by default. The other is the
trigger to determine when to emit the result. When this trigger returns
True, this extension appends the summary of accumulated values to the list
of past summaries, and writes the list to the log file. Then, this
extension makes a new fresh summary object which is used until the next
time that the trigger fires.
It also adds some entries to each result dictionary.
- ``'epoch'`` and ``'iteration'`` are the epoch and iteration counts at the
output, respectively.
- ``'elapsed_time'`` is the elapsed time in seconds since the training
begins. The value is taken from :attr:`Trainer.elapsed_time`.
Args:
keys (iterable of strs): Keys of values to accumulate. If this is None,
all the values are accumulated and output to the log file.
trigger: Trigger that decides when to aggregate the result and output
the values. This is distinct from the trigger of this extension
itself. If it is a tuple in the form ``<int>, 'epoch'`` or
``<int>, 'iteration'``, it is passed to :class:`IntervalTrigger`.
postprocess: Callback to postprocess the result dictionaries. Each
result dictionary is passed to this callback on the output. This
callback can modify the result dictionaries, which are used to
output to the log file.
filename (str): Name of the log file under the output directory. It can
be a format string: the last result dictionary is passed for the
formatting. For example, users can use '{iteration}' to separate
the log files for different iterations. If the log name is None, it
does not output the log to any file.
For historical reasons ``log_name`` is also accepted as an alias
of this argument.
"""
def __init__(self, keys=None, trigger=(1, 'epoch'), postprocess=None,
filename=None, **kwargs):
self._keys = keys
self._trigger = trigger_module.get_trigger(trigger)
self._postprocess = postprocess
self._log = []
log_name, = argument.parse_kwargs(
kwargs, ('log_name', 'log'),
)
if filename is None:
filename = log_name
del log_name # avoid accidental use
self._log_name = filename
self._init_summary()
def __call__(self, trainer):
# accumulate the observations
keys = self._keys
observation = trainer.observation
summary = self._summary
if keys is None:
summary.add(observation)
else:
summary.add({k: observation[k] for k in keys if k in observation})
if trainer.is_before_training or self._trigger(trainer):
# output the result
stats = self._summary.compute_mean()
stats_cpu = {}
for name, value in six.iteritems(stats):
stats_cpu[name] = float(value) # copy to CPU
updater = trainer.updater
stats_cpu['epoch'] = updater.epoch
stats_cpu['iteration'] = updater.iteration
stats_cpu['elapsed_time'] = trainer.elapsed_time
if self._postprocess is not None:
self._postprocess(stats_cpu)
self._log.append(stats_cpu)
# write to the log file
if self._log_name is not None:
log_name = self._log_name.format(**stats_cpu)
with utils.tempdir(prefix=log_name, dir=trainer.out) as tempd:
path = os.path.join(tempd, 'log.json')
with open(path, 'w') as f:
json.dump(self._log, f, indent=4)
new_path = os.path.join(trainer.out, log_name)
shutil.move(path, new_path)
# reset the summary for the next output
self._init_summary()
@property
def log(self):
"""The current list of observation dictionaries."""
return self._log
def serialize(self, serializer):
if hasattr(self._trigger, 'serialize'):
self._trigger.serialize(serializer['_trigger'])
try:
self._summary.serialize(serializer['_summary'])
except KeyError:
warnings.warn('The statistics are not saved.')
# Note that this serialization may lose some information of small
# numerical differences.
if isinstance(serializer, serializer_module.Serializer):
log = json.dumps(self._log)
serializer('_log', log)
else:
log = serializer('_log', '')
self._log = json.loads(log)
def _init_summary(self):
self._summary = reporter.DictSummary()
| 5,706
| 37.560811
| 79
|
py
|
chainer
|
chainer-master/chainer/training/extensions/plot_report.py
|
import json
from os import path
import warnings
import numpy
import six
from chainer import reporter
from chainer import serializer as serializer_module
from chainer.training import extension
from chainer.training import trigger as trigger_module
from chainer.utils import argument
_available = None
def _try_import_matplotlib():
global matplotlib, _available
try:
import matplotlib # NOQA
_available = True
except (ImportError, TypeError):
_available = False
def _check_available():
if _available is None:
_try_import_matplotlib()
if not _available:
warnings.warn('matplotlib is not installed on your environment, '
'so nothing will be plotted at this time. '
'Please install matplotlib to plot figures.\n\n'
' $ pip install matplotlib\n')
class PlotReport(extension.Extension):
"""__init__(\
y_keys, x_key='iteration', trigger=(1, 'epoch'), postprocess=None, \
filename='plot.png', marker='x', grid=True)
Trainer extension to output plots.
This extension accumulates the observations of the trainer to
:class:`~chainer.DictSummary` at a regular interval specified by a supplied
trigger, and plot a graph with using them.
There are two triggers to handle this extension. One is the trigger to
invoke this extension, which is used to handle the timing of accumulating
the results. It is set to ``1, 'iteration'`` by default. The other is the
trigger to determine when to emit the result. When this trigger returns
True, this extension appends the summary of accumulated values to the list
of past summaries, and writes the list to the log file. Then, this
extension makes a new fresh summary object which is used until the next
time that the trigger fires.
It also adds ``'epoch'`` and ``'iteration'`` entries to each result
dictionary, which are the epoch and iteration counts at the output.
.. warning::
If your environment needs to specify a backend of matplotlib
explicitly, please call ``matplotlib.use`` before calling
``trainer.run``. For example:
.. code-block:: python
import matplotlib
matplotlib.use('Agg')
trainer.extend(
extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', filename='loss.png'))
trainer.run()
Then, once one of instances of this extension is called,
``matplotlib.use`` will have no effect.
For the details, please see here:
https://matplotlib.org/faq/usage_faq.html#what-is-a-backend
Args:
y_keys (iterable of strs): Keys of values regarded as y. If this is
``None``, nothing is output to the graph.
x_key (str): Keys of values regarded as x. The default value is
'iteration'.
trigger: Trigger that decides when to aggregate the result and output
the values. This is distinct from the trigger of this extension
itself. If it is a tuple in the form ``<int>, 'epoch'`` or ``<int>,
'iteration'``, it is passed to :class:`IntervalTrigger`.
postprocess: Callback to postprocess the result dictionaries. Figure
object, Axes object, and all plot data are passed to this callback
in this order. This callback can modify the figure.
filename (str): Name of the figure file under the output directory.
It can be a format string.
For historical reasons ``file_name`` is also accepted as an alias
of this argument.
marker (str): The marker used to plot the graph. Default is ``'x'``. If
``None`` is given, it draws with no markers.
grid (bool): If ``True``, set the axis grid on.
The default value is ``True``.
"""
def __init__(self, y_keys, x_key='iteration', trigger=(1, 'epoch'),
postprocess=None, filename=None, marker='x',
grid=True, **kwargs):
file_name, = argument.parse_kwargs(kwargs, ('file_name', 'plot.png'))
if filename is None:
filename = file_name
del file_name # avoid accidental use
_check_available()
self._x_key = x_key
if isinstance(y_keys, str):
y_keys = (y_keys,)
self._y_keys = y_keys
self._trigger = trigger_module.get_trigger(trigger)
self._file_name = filename
self._marker = marker
self._grid = grid
self._postprocess = postprocess
self._init_summary()
self._data = {k: [] for k in y_keys}
@staticmethod
def available():
_check_available()
return _available
def __call__(self, trainer):
if self.available():
# Dynamically import pyplot to call matplotlib.use()
# after importing chainer.training.extensions
import matplotlib.pyplot as plt
else:
return
keys = self._y_keys
observation = trainer.observation
summary = self._summary
if keys is None:
summary.add(observation)
else:
summary.add({k: observation[k] for k in keys if k in observation})
if trainer.is_before_training or self._trigger(trainer):
stats = self._summary.compute_mean()
stats_cpu = {}
for name, value in six.iteritems(stats):
stats_cpu[name] = float(value) # copy to CPU
updater = trainer.updater
stats_cpu['epoch'] = updater.epoch
stats_cpu['iteration'] = updater.iteration
x = stats_cpu[self._x_key]
data = self._data
for k in keys:
if k in stats_cpu:
data[k].append((x, stats_cpu[k]))
f = plt.figure()
a = f.add_subplot(111)
a.set_xlabel(self._x_key)
if self._grid:
a.grid()
for k in keys:
xy = data[k]
if len(xy) == 0:
continue
xy = numpy.array(xy)
a.plot(xy[:, 0], xy[:, 1], marker=self._marker, label=k)
if a.has_data():
if self._postprocess is not None:
self._postprocess(f, a, summary)
l = a.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
f.savefig(path.join(trainer.out, self._file_name),
bbox_extra_artists=(l,), bbox_inches='tight')
plt.close()
self._init_summary()
def serialize(self, serializer):
if isinstance(serializer, serializer_module.Serializer):
serializer('_plot_{}'.format(self._file_name),
json.dumps(self._data))
else:
self._data = json.loads(
serializer('_plot_{}'.format(self._file_name), ''))
def _init_summary(self):
self._summary = reporter.DictSummary()
| 7,114
| 33.877451
| 79
|
py
|
chainer
|
chainer-master/chainer/training/triggers/interval_trigger.py
|
import warnings
class IntervalTrigger(object):
"""Trigger based on a fixed interval.
This trigger accepts iterations divided by a given interval. There are two
ways to specify the interval: per iterations and epochs. `Iteration` means
the number of updates, while `epoch` means the number of sweeps over the
training dataset. Fractional values are allowed if the interval is a
number of epochs; the trigger uses the `iteration` and `epoch_detail`
attributes defined by the updater.
For the description of triggers, see :func:`~chainer.training.get_trigger`.
Args:
period (int or float): Length of the interval. Must be an integer if
unit is ``'iteration'``.
unit (str): Unit of the length specified by ``period``. It must be
either ``'iteration'`` or ``'epoch'``.
"""
def __init__(self, period, unit):
if unit not in ('epoch', 'iteration'):
raise ValueError(
'Trigger unit must be either \'epoch\' or \'iteration\'.')
self.period = period
self.unit = unit
self._previous_iteration = 0
self._previous_epoch_detail = 0.
# count is kept for backward compatibility
self.count = 0
def __call__(self, trainer):
"""Decides whether the extension should be called on this iteration.
Args:
trainer (Trainer): Trainer object that this trigger is associated
with. The updater associated with this trainer is used to
determine if the trigger should fire.
Returns:
bool: True if the corresponding extension should be invoked in this
iteration.
"""
updater = trainer.updater
if self.unit == 'epoch':
epoch_detail = updater.epoch_detail
previous_epoch_detail = self._previous_epoch_detail
# if previous_epoch_detail is invalid value,
# use the value of updater.
if previous_epoch_detail < 0:
previous_epoch_detail = updater.previous_epoch_detail
# count is kept for backward compatibility
self.count = epoch_detail // self.period
fire = previous_epoch_detail // self.period != \
epoch_detail // self.period
else:
iteration = updater.iteration
previous_iteration = self._previous_iteration
# if previous_iteration is invalid value,
# guess it from current iteration.
if previous_iteration < 0:
previous_iteration = iteration - 1
fire = previous_iteration // self.period != \
iteration // self.period
# save current values
self._previous_iteration = updater.iteration
if hasattr(updater, 'epoch_detail'):
self._previous_epoch_detail = updater.epoch_detail
return fire
def serialize(self, serializer):
try:
self._previous_iteration = serializer(
'previous_iteration', self._previous_iteration)
except KeyError:
warnings.warn(
'The previous value of iteration is not saved. '
'IntervalTrigger guesses it using current iteration. '
'If this trigger is not called at every iteration, '
'it may not work correctly.')
# set a negative value for invalid
self._previous_iteration = -1
try:
self._previous_epoch_detail = serializer(
'previous_epoch_detail', self._previous_epoch_detail)
except KeyError:
warnings.warn(
'The previous value of epoch_detail is not saved. '
'IntervalTrigger uses the value of '
'trainer.updater.previous_epoch_detail. '
'If this trigger is not called at every iteration, '
'it may not work correctly.')
# set a negative value for invalid
self._previous_epoch_detail = -1.
def get_training_length(self):
return (self.period, self.unit)
def __str__(self):
"""Returns a string describing the class and interval
Returns:
str: IntervalTrigger(<period>, '<unit>')
"""
return '{}({}, \'{}\')'.format(
self.__class__.__name__, self.period, self.unit
)
| 4,434
| 34.766129
| 79
|
py
|
chainer
|
chainer-master/chainer/training/triggers/minmax_value_trigger.py
|
from chainer import reporter
from chainer.training import util
class BestValueTrigger(object):
"""Trigger invoked when specific value becomes best.
Args:
key (str): Key of value.
compare (callable): Compare function which takes current best value and
new value and returns whether new value is better than current
best.
trigger: Trigger that decides the comparison interval between current
best value and new value. This must be a tuple in the form of
``<int>, 'epoch'`` or ``<int>, 'iteration'`` which is passed to
:class:`~chainer.training.triggers.IntervalTrigger`.
"""
def __init__(self, key, compare, trigger=(1, 'epoch')):
self._key = key
self._best_value = None
self._interval_trigger = util.get_trigger(trigger)
self._init_summary()
self._compare = compare
def __call__(self, trainer):
"""Decides whether the extension should be called on this iteration.
Args:
trainer (~chainer.training.Trainer): Trainer object that this
trigger is associated with. The ``observation`` of this trainer
is used to determine if the trigger should fire.
Returns:
bool: ``True`` if the corresponding extension should be invoked in
this iteration.
"""
observation = trainer.observation
summary = self._summary
key = self._key
if key in observation:
summary.add({key: observation[key]})
if not self._interval_trigger(trainer):
return False
stats = summary.compute_mean()
value = float(stats[key]) # copy to CPU
self._init_summary()
if self._best_value is None or self._compare(self._best_value, value):
self._best_value = value
return True
return False
def _init_summary(self):
self._summary = reporter.DictSummary()
def serialize(self, serializer):
self._interval_trigger.serialize(serializer['interval_trigger'])
self._summary.serialize(serializer['summary'])
self._best_value = serializer('best_value', self._best_value)
class MaxValueTrigger(BestValueTrigger):
"""Trigger invoked when specific value becomes maximum.
For example you can use this trigger to take snapshot on the epoch the
validation accuracy is maximum.
Args:
key (str): Key of value. The trigger fires when the value associated
with this key becomes maximum.
trigger: Trigger that decides the comparison interval between current
best value and new value. This must be a tuple in the form of
``<int>, 'epoch'`` or ``<int>, 'iteration'`` which is passed to
:class:`~chainer.training.triggers.IntervalTrigger`.
"""
def __init__(self, key, trigger=(1, 'epoch')):
super(MaxValueTrigger, self).__init__(
key, lambda max_value, new_value: new_value > max_value, trigger)
class MinValueTrigger(BestValueTrigger):
"""Trigger invoked when specific value becomes minimum.
For example you can use this trigger to take snapshot on the epoch the
validation loss is minimum.
Args:
key (str): Key of value. The trigger fires when the value associated
with this key becomes minimum.
trigger: Trigger that decides the comparison interval between current
best value and new value. This must be a tuple in the form of
``<int>, 'epoch'`` or ``<int>, 'iteration'`` which is passed to
:class:`~chainer.training.triggers.IntervalTrigger`.
"""
def __init__(self, key, trigger=(1, 'epoch')):
super(MinValueTrigger, self).__init__(
key, lambda min_value, new_value: new_value < min_value, trigger)
| 3,888
| 34.036036
| 79
|
py
|
chainer
|
chainer-master/chainer/training/triggers/manual_schedule_trigger.py
|
import warnings
class ManualScheduleTrigger(object):
"""Trigger invoked at specified point(s) of iterations or epochs.
This trigger accepts iterations or epochs indicated by given point(s).
There are two ways to specify the point(s): iteration and epoch.
``iteration`` means the number of updates, while ``epoch`` means the number
of sweeps over the training dataset. Fractional values are allowed
if the point is a number of epochs; the trigger uses the ``iteration``
and ``epoch_detail`` attributes defined by the updater.
Args:
points (int, float, or list of int or float): time of the trigger.
Must be an integer or list of integer if unit is ``'iteration'``.
unit (str): Unit of the time specified by ``points``. It must be
either ``'iteration'`` or ``'epoch'``.
Attributes:
finished (bool): Flag that indicates whether or not this trigger will
fire in the future. This flag is used to determine if the extension
should be initialized after resume.
"""
def __init__(self, points, unit):
if unit not in ('epoch', 'iteration'):
raise ValueError(
'Trigger unit must be either \'epoch\' or \'iteration\'.')
self.points = (points if isinstance(points, list) else [points])
self.unit = unit
self.finished = False
self._previous_iteration = 0
self._previous_epoch_detail = 0.
def __call__(self, trainer):
"""Decides whether the extension should be called on this iteration.
Args:
trainer (Trainer): Trainer object that this trigger is associated
with. The updater associated with this trainer is used to
determine if the trigger should fire.
Returns:
bool: True if the corresponding extension should be invoked in this
iteration.
"""
updater = trainer.updater
if self.unit == 'epoch':
epoch_detail = updater.epoch_detail
previous_epoch_detail = self._previous_epoch_detail
# if previous_epoch_detail is invalid value,
# use the value of updater.
if previous_epoch_detail < 0:
previous_epoch_detail = updater.previous_epoch_detail
fire = any(
previous_epoch_detail < p <= epoch_detail
for p in self.points)
if hasattr(self, '_finished_is_tmp'):
del self._finished_is_tmp
if epoch_detail >= max(self.points):
self.finished = True
if fire and epoch_detail >= max(self.points):
self.finished = True
else:
iteration = updater.iteration
previous_iteration = self._previous_iteration
# if previous_iteration is invalid value,
# guess it from current iteration.
if previous_iteration < 0:
previous_iteration = iteration - 1
fire = any(
previous_iteration < p <= iteration
for p in self.points)
if hasattr(self, '_finished_is_tmp'):
del self._finished_is_tmp
if iteration >= max(self.points):
self.finished = True
if fire and iteration >= max(self.points):
self.finished = True
# save current values
self._previous_iteration = updater.iteration
if hasattr(updater, 'epoch_detail'):
self._previous_epoch_detail = updater.epoch_detail
return fire
def serialize(self, serializer):
try:
self._previous_iteration = serializer(
'previous_iteration', self._previous_iteration)
except KeyError:
warnings.warn(
'The previous value of iteration is not saved. '
'ManualScheduleTrigger guesses it using current iteration. '
'If this trigger is not called at every iteration, '
'it may not work correctly.')
# set a negative value for invalid
self._previous_iteration = -1
try:
self._previous_epoch_detail = serializer(
'previous_epoch_detail', self._previous_epoch_detail)
except KeyError:
warnings.warn(
'The previous value of epoch_detail is not saved. '
'ManualScheduleTrigger uses the value of '
'trainer.updater.previous_epoch_detail. '
'If this trigger is not called at every iteration, '
'it may not work correctly.')
# set a negative value for invalid
self._previous_epoch_detail = -1.
try:
self.finished = serializer('finished', self.finished)
except KeyError:
warnings.warn(
'The flag of finished is not saved. '
'ManualScheduleTrigger set the flag to `False` to force '
'initialization and reset in next `__call__`.')
# set False to force initialization.
self.finished = False
self._finished_is_tmp = True
| 5,231
| 37.470588
| 79
|
py
|
chainer
|
chainer-master/chainer/training/triggers/once_trigger.py
|
import warnings
class OnceTrigger(object):
"""Trigger based on the starting point of the iteration.
This trigger accepts only once at starting point of the iteration. There
are two ways to specify the starting point: only starting point in whole
iteration or called again when training resumed.
Args:
call_on_resume (bool): Whether the extension is called again or not
when restored from a snapshot. It is set to ``False`` by default.
Attributes:
finished (bool): Flag that indicates whether or not this trigger will
fire in the future. This flag is used to determine if the extension
should be initialized after resume.
"""
def __init__(self, call_on_resume=False):
self._flag_first = True
self._flag_resumed = call_on_resume
@property
def finished(self):
return not (self._flag_first or self._flag_resumed)
def __call__(self, trainer):
fire = not self.finished
self._flag_resumed = False
self._flag_first = False
return fire
def serialize(self, serializer):
try:
self._flag_first = serializer('_flag_first', self._flag_first)
except KeyError:
warnings.warn(
'The flag is not saved.'
'OnceTrigger guess it is not first when resumed. '
'If this trigger is resumed before first called, '
'it may not work correctly.')
self._flag_first = False
| 1,517
| 31.297872
| 77
|
py
|
chainer
|
chainer-master/chainer/training/triggers/__init__.py
|
# import classes and functions
from chainer.training.triggers.early_stopping_trigger import EarlyStoppingTrigger # NOQA
from chainer.training.triggers.interval_trigger import IntervalTrigger # NOQA
from chainer.training.triggers.manual_schedule_trigger import ManualScheduleTrigger # NOQA
from chainer.training.triggers.minmax_value_trigger import BestValueTrigger # NOQA
from chainer.training.triggers.minmax_value_trigger import MaxValueTrigger # NOQA
from chainer.training.triggers.minmax_value_trigger import MinValueTrigger # NOQA
from chainer.training.triggers.once_trigger import OnceTrigger # NOQA
from chainer.training.triggers.time_trigger import TimeTrigger # NOQA
| 684
| 67.5
| 91
|
py
|
chainer
|
chainer-master/chainer/training/triggers/early_stopping_trigger.py
|
import operator
import warnings
from chainer import reporter
from chainer.training import util
from chainer.utils import argument
class EarlyStoppingTrigger(object):
"""__init__(\
self, check_trigger=(1, 'epoch'), monitor='main/loss', \
patience=3, mode='auto', verbose=False, \
max_trigger=(100, 'epoch'))
Trigger for Early Stopping
It can be used as a stop trigger of :class:`~chainer.training.Trainer`
to realize *early stopping* technique.
This trigger works as follows.
Within each *check interval* defined by the ``check_trigger`` argument,
it monitors and accumulates the reported value at each iteration.
At the end of each interval, it computes the mean of the accumulated
values and compares it to the previous ones to maintain the *best* value.
When it finds that the best value is not updated
for some periods (defined by ``patience``), this trigger fires.
Args:
monitor (str) : The metric you want to monitor
check_trigger: Trigger that decides the comparison
interval between current best value and new value.
This must be a tuple in the form of ``<int>,
'epoch'`` or ``<int>, 'iteration'`` which is passed to
:class:`~chainer.training.triggers.IntervalTrigger`.
patience (int) : Counts to let the trigger be patient.
The trigger will not fire until the condition is met
for successive ``patience`` checks.
mode (str) : ``'max'``, ``'min'``, or ``'auto'``.
It is used to determine how to compare the monitored values.
verbose (bool) : Enable verbose output.
If verbose is true, you can get more information
max_trigger: Upper bound of the number of training loops
.. note::
``patients`` is also available as an alias of ``patience`` for
historical reason.
"""
def __init__(self, check_trigger=(1, 'epoch'), monitor='main/loss',
patience=None, mode='auto', verbose=False,
max_trigger=(100, 'epoch'), **kwargs):
# `patients` as an alias of `patience`
patients, = argument.parse_kwargs(kwargs, ('patients', None))
if patients is None:
if patience is None:
patience = 3
else:
pass
else:
if patience is None:
patience = patients
else:
raise TypeError(
'Both \'patience\' and \'patients\' arguments are '
'specified. \'patients\' is an alias of the former. '
'Specify only \'patience\'.')
self.count = 0
self.patience = patience
self.monitor = monitor
self.verbose = verbose
self.already_warning = False
self._max_trigger = util.get_trigger(max_trigger)
self._interval_trigger = util.get_trigger(check_trigger)
self._init_summary()
if mode == 'max':
self._compare = operator.gt
elif mode == 'min':
self._compare = operator.lt
else:
if 'accuracy' in monitor:
self._compare = operator.gt
else:
self._compare = operator.lt
if self._compare == operator.gt:
if verbose:
print('early stopping: operator is greater')
self.best = float('-inf')
else:
if verbose:
print('early stopping: operator is less')
self.best = float('inf')
def __call__(self, trainer):
"""Decides whether the training loop should be stopped.
Args:
trainer (~chainer.training.Trainer): Trainer object that this
trigger is associated with. The ``observation`` of this trainer
is used to determine if the trigger should fire.
Returns:
bool: ``True`` if the training loop should be stopped.
"""
observation = trainer.observation
summary = self._summary
if self.monitor in observation:
summary.add({self.monitor: observation[self.monitor]})
if self._max_trigger(trainer):
return True
if not self._interval_trigger(trainer):
return False
if self.monitor not in observation.keys():
warnings.warn('{} is not in observation'.format(self.monitor))
return False
stat = self._summary.compute_mean()
current_val = stat[self.monitor]
self._init_summary()
if self._compare(current_val, self.best):
self.best = current_val
self.count = 0
else:
self.count += 1
if self._stop_condition():
if self.verbose:
print('Epoch {}: early stopping'.format(trainer.updater.epoch))
return True
return False
def _stop_condition(self):
return self.count >= self.patience
def _init_summary(self):
self._summary = reporter.DictSummary()
def get_training_length(self):
return self._max_trigger.get_training_length()
| 5,200
| 32.127389
| 79
|
py
|
chainer
|
chainer-master/chainer/training/triggers/time_trigger.py
|
class TimeTrigger(object):
"""Trigger based on a fixed time interval.
This trigger accepts iterations with a given interval time.
Args:
period (float): Interval time. It is given in seconds.
"""
def __init__(self, period):
self._period = period
self._next_time = self._period
def __call__(self, trainer):
if self._next_time < trainer.elapsed_time:
self._next_time += self._period
return True
else:
return False
def serialize(self, serializer):
self._next_time = serializer('next_time', self._next_time)
| 622
| 23.92
| 66
|
py
|
chainer
|
chainer-master/chainer/backends/intel64.py
|
from __future__ import absolute_import
import numpy
import chainer
from chainer import _backend
from chainer.backends import _cpu
from chainer.configuration import config
_ideep_version = None
_error = None
try:
import ideep4py as ideep # NOQA
from ideep4py import mdarray # type: ignore # NOQA
_ideep_version = 2 if hasattr(ideep, '__version__') else 1
except ImportError as e:
_error = e
_ideep_version = None
class mdarray(object): # type: ignore
pass # for type testing
class Intel64Device(_backend.Device):
"""Device for Intel64 (Intel Architecture) backend with iDeep"""
xp = numpy
name = '@intel64'
supported_array_types = (numpy.ndarray, mdarray)
__hash__ = _backend.Device.__hash__
def __init__(self):
check_ideep_available()
super(Intel64Device, self).__init__()
@staticmethod
def from_array(array):
if isinstance(array, mdarray):
return Intel64Device()
return None
def __eq__(self, other):
return isinstance(other, Intel64Device)
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
def send_array(self, array):
if isinstance(array, ideep.mdarray):
return array
if not isinstance(array, numpy.ndarray):
array = _cpu._to_cpu(array) # to numpy.ndarray
if (isinstance(array, numpy.ndarray) and
array.ndim in (1, 2, 4) and
0 not in array.shape):
# TODO(kmaehashi): Remove ndim validation once iDeep has fixed.
# Currently iDeep only supports (1, 2, 4)-dim arrays.
# Note that array returned from `ideep.array` may not be an
# iDeep mdarray, e.g., when the dtype is not float32.
array = ideep.array(array, itype=ideep.wgt_array)
return array
def is_array_supported(self, array):
return isinstance(array, (numpy.ndarray, mdarray))
# ------------------------------------------------------------------------------
# ideep configuration
# ------------------------------------------------------------------------------
_SHOULD_USE_IDEEP = {
'==always': {'always': True, 'auto': False, 'never': False},
'>=auto': {'always': True, 'auto': True, 'never': False},
}
def is_ideep_available():
"""Returns if iDeep is available.
Returns:
bool: ``True`` if the supported version of iDeep is installed.
"""
return _ideep_version is not None and _ideep_version == 2
def check_ideep_available():
"""Checks if iDeep is available.
When iDeep is correctly set up, nothing happens.
Otherwise it raises ``RuntimeError``.
"""
if _ideep_version is None:
# If the error is missing shared object, append a message to
# redirect to the ideep website.
msg = str(_error)
if 'cannot open shared object file' in msg:
msg += ('\n\nEnsure iDeep requirements are satisfied: '
'https://github.com/intel/ideep')
raise RuntimeError(
'iDeep is not available.\n'
'Reason: {}: {}'.format(type(_error).__name__, msg))
elif _ideep_version != 2:
raise RuntimeError(
'iDeep is not available.\n'
'Reason: Unsupported iDeep version ({})'.format(_ideep_version))
def should_use_ideep(level):
"""Determines if we should use iDeep.
This function checks ``chainer.config.use_ideep`` and availability
of ``ideep4py`` package.
Args:
level (str): iDeep use level. It must be either ``'==always'`` or
``'>=auto'``. ``'==always'`` indicates that the ``use_ideep``
config must be ``'always'`` to use iDeep.
Returns:
bool: ``True`` if the caller should use iDeep.
"""
if not is_ideep_available():
return False
# TODO(niboshi):
# Add lowest_version argument and compare with ideep version.
# Currently ideep does not provide a way to retrieve its version.
if level not in _SHOULD_USE_IDEEP:
raise ValueError('invalid iDeep use level: %s '
'(must be either of "==always" or ">=auto")' %
repr(level))
flags = _SHOULD_USE_IDEEP[level]
use_ideep = config.use_ideep
if use_ideep not in flags:
raise ValueError('invalid use_ideep configuration: %s '
'(must be either of "always", "auto", or "never")' %
repr(use_ideep))
return flags[use_ideep]
def inputs_all_ready(inputs, supported_ndim=(2, 4)):
"""Checks if input arrays are supported for an iDeep primitive.
Before calling an iDeep primitive (e.g., ``ideep4py.linear.Forward``), you
need to make sure that all input arrays are ready for the primitive by
calling this function.
Information to be checked includes array types, dimesions and data types.
The function checks ``inputs`` info and ``supported_ndim``.
Inputs to be tested can be any of ``Variable``, ``numpy.ndarray`` or
``ideep4py.mdarray``. However, all inputs to iDeep primitives must be
``ideep4py.mdarray``. Callers of iDeep primitives are responsible of
converting all inputs to ``ideep4py.mdarray``.
Args:
inputs (sequence of arrays or variables):
Inputs to be checked.
supported_ndim (tuple of ints):
Supported ndim values for the iDeep primitive.
Returns:
bool: ``True`` if all conditions meet.
"""
def _is_supported_array_type(a):
return isinstance(a, ideep.mdarray) or ideep.check_type([a])
if not is_ideep_available():
return False
inputs = [x.data if isinstance(x, chainer.variable.Variable)
else x for x in inputs]
return (ideep.check_ndim(inputs, supported_ndim)
and all([_is_supported_array_type(a) for a in inputs]))
| 5,920
| 30.833333
| 80
|
py
|
chainer
|
chainer-master/chainer/backends/cuda.py
|
"""Device, context and memory management on CuPy.
.. note::
The package ``chainer.cuda`` has been renamed to
:mod:`chainer.backends.cuda` as of v4.0.0, but the previous module path
``chainer.cuda`` is also available.
Chainer uses `CuPy <https://cupy.chainer.org/>`_ (with very thin wrapper)
to exploit the speed of GPU computation. Following modules and classes defined
in CuPy are imported to :mod:`chainer.backends.cuda` module for convenience
(refer to this table when reading chainer's source codes).
===================================== =================================
imported name original name
===================================== =================================
``chainer.backends.cuda.cupy`` :mod:`cupy`
``chainer.backends.cuda.cupyx`` :mod:`cupyx`
``chainer.backends.cuda.ndarray`` :class:`cupy.ndarray`
``chainer.backends.cuda.cupy.cuda`` :mod:`cupy.cuda`
``chainer.backends.cuda.Device`` :class:`cupy.cuda.Device`
``chainer.backends.cuda.Event`` :class:`cupy.cuda.Event`
``chainer.backends.cuda.Stream`` :class:`cupy.cuda.Stream`
===================================== =================================
Chainer replaces the default allocator of CuPy by its memory pool
implementation. It enables us to reuse the device memory over multiple
forward/backward computations, and temporary arrays for consecutive elementwise
operations.
"""
import binascii
import functools
import itertools
import os
import threading
import time
import typing as tp # NOQA
import warnings
import numpy
import six
import chainer
from chainer import _backend
from chainer.backends import _cpu
from chainer.backends import intel64
from chainer.configuration import config
from chainer import types # NOQA
import chainerx
available = False # type: bool
cudnn_enabled = False # type: bool
_cupy_major = 0 # type: bool
try:
import cupy
from cupy import cuda # NOQA
from cupy.cuda import cublas # NOQA
import cupyx # NOQA
import cupyx.scipy.linalg # NOQA
import cupyx.scipy.special # NOQA
from cupy import ndarray # type: ignore # NOQA
from cupy.cuda import Device # type: ignore # NOQA
from cupy.cuda import Event # type: ignore # NOQA
from cupy.cuda import Stream # type: ignore # NOQA
# Alias for ignoring the warning in setup.cfg
try:
from cupy._util import PerformanceWarning as _PerformanceWarning # NOQA
except ImportError:
from cupy.util import PerformanceWarning as _PerformanceWarning # NOQA
available = True
except Exception as e:
_resolution_error = e
class ndarray(object): # type: ignore # for type testing
@property
def shape(self) -> types.Shape:
pass
@property
def device(self) -> 'Device':
pass
def get(self, stream: tp.Optional['Stream'] = None) -> numpy.ndarray:
pass
def set(
self,
arr: numpy.ndarray,
stream: tp.Optional['Stream'] = None
) -> None:
pass
class Device(object): # type: ignore # for type testing
def __init__(self, device: tp.Optional[int] = None) -> None:
pass
def __enter__(self) -> 'Device':
pass
def __exit__(self, *args: tp.Any) -> None:
pass
class Event(object): # type: ignore # for type testing
pass
class Stream(object): # type: ignore # for type testing
pass
# for `xp is chainer.backends.cuda.cupy` to always work
cupy = object()
# Dummy class for ignoring cupy.util.PerformanceWarning in setup.cfg.
class _PerformanceWarning(Warning):
pass
if available:
_cupy_major = int(cupy.__version__.split('.')[0])
_cudnn_disabled_by_user = int(os.environ.get('CHAINER_CUDNN', '1')) == 0
try:
if 7 < _cupy_major:
import cupy.cuda.cudnn
import cupy.cudnn
cudnn = cupy.cudnn # type: tp.Optional[types.ModuleType]
libcudnn = cupy.cuda.cudnn # type: tp.Any # NOQA
cudnn_enabled = not _cudnn_disabled_by_user
except Exception as e:
_resolution_error = e
# for `chainer.backends.cuda.libcudnn` to always work
libcudnn = object()
def check_cuda_available():
"""Checks if CUDA is available.
When CUDA is correctly set up, nothing happens.
Otherwise it raises ``RuntimeError``.
"""
if not available:
msg = ('CUDA environment is not correctly set up\n'
'(see https://github.com/chainer/chainer#installation).')
msg += str(_resolution_error)
raise RuntimeError(msg)
if (not cudnn_enabled and
not _cudnn_disabled_by_user and
not getattr(check_cuda_available, '_already_warned', False)):
warnings.warn(
'cuDNN is not enabled.\n'
'Please reinstall CuPy after you install cudnn\n'
'(see https://docs-cupy.chainer.org/en/stable/install.html'
'#install-cudnn).')
check_cuda_available._already_warned = True
class DummyDeviceType(object):
"""Dummy device class that does nothing with cupy.cuda.Device interface.
This class is used to represent CPU device.
"""
id = -1
def __init__(self):
pass
def __int__(self):
return -1
def __enter__(self):
return self
def __exit__(self, *args):
pass
def use(self):
pass
def synchronize(self):
pass
def __eq__(self, other):
return isinstance(other, DummyDeviceType)
def __ne__(self, other):
return not (self == other)
DummyDevice = DummyDeviceType()
# ------------------------------------------------------------------------------
# Global states
# ------------------------------------------------------------------------------
if available:
# This is for backward compatibility
memory_pool = cupy.get_default_memory_pool()
pinned_memory_pool = cupy.get_default_pinned_memory_pool()
_integer_types = six.integer_types + (numpy.integer,)
# ------------------------------------------------------------------------------
# Device
# ------------------------------------------------------------------------------
class GpuDevice(_backend.Device):
"""Device for GPU (CuPy) backend"""
xp = cupy
supported_array_types = (ndarray,)
__hash__ = _backend.Device.__hash__
def __init__(self, device):
check_cuda_available()
assert isinstance(device, Device)
super(GpuDevice, self).__init__()
self.device = device
@staticmethod
def from_device_id(device_id):
"""Returns a :class:`~chainer.backend.GpuDevice` corresponding \
to the CUDA device ID.
"""
check_cuda_available()
if not (isinstance(device_id, _integer_types) and device_id >= 0):
raise ValueError('Invalid CUDA device ID: {}'.format(device_id))
return GpuDevice(Device(device_id))
@staticmethod
def from_array(array):
if isinstance(array, ndarray) and array.device is not None:
return GpuDevice(array.device)
return None
@property
def name(self):
return '@cupy:{}'.format(self.device.id)
def __eq__(self, other):
return isinstance(other, GpuDevice) and other.device == self.device
def __repr__(self):
return '<{} (cupy):{}>'.format(
self.__class__.__name__, self.device.id)
def create_context(self):
# Creates a new cuda.Device instance because a single cuda.Device
# instance cannot be used across threads.
return Device(self.device.id)
def send_array(self, array):
return _array_to_gpu(array, self.device, None)
def use(self):
self.device.use()
def is_array_supported(self, array):
return isinstance(array, ndarray) and self.device == array.device
# ------------------------------------------------------------------------------
# Global states
# ------------------------------------------------------------------------------
def get_device_from_id(device_id: tp.Optional[int]) -> Device:
"""Gets the device from an ID integer.
Args:
device_id (int or None): The ID of the device which this function
returns.
"""
if device_id is not None:
if device_id >= 0:
check_cuda_available()
return Device(int(device_id))
return DummyDevice
def get_device_from_array(*arrays: ndarray) -> Device:
"""Gets the device from a list of CuPy array or a single CuPy array.
.. deprecated:: v6.0.0
This API is deprecated. Please use
:func:`chainer.backend.get_device_from_array` instead.
The device on which the given CuPy array reside is returned.
.. note::
This method only recognizes :class:`cupy.ndarray`\\ s in arguments.
Especially note that, unlike :func:`get_array_module`, this method
does not recognize :class:`~chainer.Variable` objects.
If you need to get device from the :class:`~chainer.Variable` instance
``v``, you need to use ``get_device_from_array(v.array)``.
Args:
arrays (:class:`cupy.ndarray` or list of :class:`cupy.ndarray`):
A CuPy array which this function returns the device corresponding
to. If a list of :class:`cupy.ndarray`\\ s are given, it returns
the first device object of an array in the list.
"""
for array in arrays:
if isinstance(array, ndarray) and array.device is not None:
return array.device
return DummyDevice
def get_device(*args):
"""Gets the device from a device object, an ID integer or an array object.
.. note::
This API is deprecated since v3.0.0. Please use
:func:`~chainer.backends.cuda.get_device_from_id`
or :func:`~chainer.backends.cuda.get_device_from_array` instead.
This is a convenient utility to select a correct device if the type of
``arg`` is unknown (i.e., one can use this function on arrays that may be
on CPU or GPU). The returned device object supports the context management
protocol of Python for the *with* statement.
Args:
args: Values to specify a GPU device. The first device object, integer
or :class:`cupy.ndarray` object is used to select a device.
If it is a device object, it is returned. If it is an integer,
the corresponding device is returned. If it is a CuPy array,
the device on which this array reside is returned. If any
arguments are neither integers nor CuPy arrays, a dummy device
object representing CPU is returned.
Returns:
Device object specified by given ``args``.
.. seealso::
See :class:`cupy.cuda.Device` for the device selection not by arrays.
"""
warnings.warn('get_device is deprecated. Please use get_device_from_id or'
' get_device_from_array instead.', DeprecationWarning)
return _get_cuda_device(*args)
def _get_cuda_device(*args):
# Returns cuda.Device or DummyDevice.
for arg in args:
if type(arg) is not bool and isinstance(arg, _integer_types):
check_cuda_available()
return Device(arg)
if isinstance(arg, ndarray):
if arg.device is None:
continue
return arg.device
if available and isinstance(arg, Device):
return arg
# NOTE: This function returns DummyDevice for both NumPy and ChainerX
return DummyDevice
def _get_device_or_current(
device: tp.Optional[types.CudaDeviceSpec]
) -> Device:
# Returns cuda.Device.
# - If cuda.Device instance, it's returned intact.
# - If None, the current device is returned.
# - If non-negative integer, cuda.Device is returned.
# - Otherwise: error.
if device is None:
return cuda.Device()
if isinstance(device, Device):
return device
if not (isinstance(device, _integer_types) and device >= 0):
raise ValueError('Invalid CUDA device specifier: {}'.format(device))
return cuda.Device(int(device))
# ------------------------------------------------------------------------------
# cupy.ndarray allocation and copy
# ------------------------------------------------------------------------------
def to_gpu(array, device=None, stream=None):
"""Copies the given CPU array to the specified device.
Args:
array (*array*, None, list or tuple):
Array or arrays to be sent to GPU.
device: CUDA device specifier. If ``None`` or :data:`cuda.DummyDevice`,
the arrays will be copied to the current CUDA device.
stream (~cupy.cuda.Stream): *(deprecated since v3.0.0)*
CUDA stream. If not ``None``, the copy runs asynchronously.
Returns:
cupy.ndarray, list or tuple: Array or arrays on GPU.
If some of the arrays are already on GPU, then this function just
returns those arrays without performing any copy.
If input arrays include `None`, it is returned as `None` as is.
"""
if stream is not None:
warnings.warn(
'The stream option is deprecated in chainer.backends.cuda.to_gpu. '
'Please remove it.', DeprecationWarning)
check_cuda_available()
if device is DummyDevice:
device = cuda.Device()
else:
device = _get_device_or_current(device)
return _backend._convert_arrays(
array, lambda arr: _array_to_gpu(arr, device, stream))
def _array_to_gpu(array, device, stream):
if array is None:
return None
if isinstance(array, chainerx.ndarray):
# TODO(niboshi): Update this logic once both CuPy and ChainerX support
# the array interface.
if array.device.backend.name == 'cuda':
# Convert to cupy.ndarray on the same device as source array
array = chainerx._to_cupy(array)
else:
array = chainerx.to_numpy(array)
elif isinstance(array, (numpy.number, numpy.bool_)):
array = numpy.asarray(array)
elif isinstance(array, intel64.mdarray):
array = numpy.asarray(array)
if isinstance(array, ndarray):
if array.device == device:
return array
is_numpy = False
elif isinstance(array, numpy.ndarray):
is_numpy = True
else:
raise TypeError(
'The array sent to gpu must be an array or a NumPy scalar.'
'\nActual type: {0}.'.format(type(array)))
if stream is not None:
with device:
with stream:
if is_numpy:
return cupy.asarray(array)
# Need to make a copy when an array is copied to another device
return cupy.array(array, copy=True)
with device:
if is_numpy:
return cupy.asarray(array)
# Need to make a copy when an array is copied to another device
return cupy.array(array, copy=True)
def to_cpu(array, stream=None):
"""Copies the given GPU array to host CPU.
Args:
array (*array*, None, list or tuple):
Array or arrays to be sent to CPU.
stream (cupy.cuda.Stream): CUDA stream.
Returns:
numpy.ndarray, list or tuple: Array on CPU.
If some of the arrays are already on CPU, then this function just
returns those arrays without performing any copy.
If input arrays include `None`, it is returned as `None` as is.
"""
return _backend._convert_arrays(
array, lambda arr: _array_to_cpu(arr, stream))
def _array_to_cpu(array, stream):
if array is None:
return None
if isinstance(array, ndarray):
check_cuda_available()
with get_device_from_array(array):
return array.get(stream)
return _cpu._array_to_cpu(array)
def copy(array, out=None, out_device=None, stream=None):
"""Copies a :class:`cupy.ndarray` object using the default stream.
This function can copy the device array to the destination array on another
device.
Args:
array (cupy.ndarray): Array to be copied.
out (cupy.ndarray): Destination array.
If it is not ``None``, then ``out_device`` argument is ignored.
out_device: Destination device specifier. Actual device object is
obtained by passing this value to :func:`get_device`.
stream (cupy.cuda.Stream): CUDA stream.
Returns:
cupy.ndarray: Copied array.
If ``out`` is not specified, then the array is allocated on the device
specified by ``out_device`` argument.
"""
# TODO(niboshi): Update docstring not to mention deprecated `get_device`
check_cuda_available()
assert stream is None # TODO(beam2d): FIX IT
if out is None:
if out_device is None:
out_device = array
with chainer.get_device(out_device):
out = cupy.empty_like(array)
with get_device_from_array(array):
cupy.copyto(out, array)
return out
# ------------------------------------------------------------------------------
# Function result memoization
# ------------------------------------------------------------------------------
def memoize(for_each_device=False):
"""Makes a function memoizing the result for each argument and device.
This is a similar version of :func:`cupy.memoize`. The difference is that
this function can be used in the global scope even if CUDA is not
available. In such case, this function does nothing.
.. note::
This decorator acts as a dummy if CUDA is not available. It cannot be
used for general purpose memoization even if ``for_each_device`` is set
to False.
"""
if available:
return cupy.memoize(for_each_device)
def dummy_decorator(f):
@functools.wraps(f)
def ret(*args, **kwargs):
return f(*args, **kwargs)
return ret
return dummy_decorator
def clear_memo():
"""Clears the memoized results for all functions decorated by memoize.
This function works like :func:`cupy.clear_memo` as a counterpart for
:func:`chainer.backends.cuda.memoize`. It can be used even if CUDA is
not available. In such a case, this function does nothing.
"""
if available:
cupy.clear_memo()
# ------------------------------------------------------------------------------
# Kernel definition utility
# ------------------------------------------------------------------------------
@memoize()
def elementwise(in_params, out_params, operation, name, **kwargs):
"""Creates an elementwise kernel function.
This function uses :func:`~chainer.backends.cuda.memoize` to cache the
kernel object, i.e. the resulting kernel object is cached for each argument
combination and CUDA device.
The arguments are the same as those for
:class:`cupy.ElementwiseKernel`, except that the ``name`` argument is
mandatory.
"""
check_cuda_available()
return cupy.ElementwiseKernel(
in_params, out_params, operation, name, **kwargs)
@memoize()
def reduce(in_params, out_params, map_expr, reduce_expr, post_map_expr,
identity, name, **kwargs):
"""Creates a global reduction kernel function.
This function uses :func:`~chainer.backends.cuda.memoize` to cache the
resulting kernel object, i.e. the resulting kernel object is cached for
each argument combination and CUDA device.
The arguments are the same as those for
:class:`cupy.ReductionKernel`, except that the ``name`` argument is
mandatory.
"""
check_cuda_available()
return cupy.ReductionKernel(
in_params, out_params, map_expr, reduce_expr, post_map_expr,
identity, name, **kwargs)
@memoize()
def raw(code, name, *args, **kwargs):
"""Creates a raw kernel function.
This function uses :func:`~chainer.backends.cuda.memoize` to cache the
resulting kernel object, i.e. the resulting kernel object is cached for
each argument combination and CUDA device.
The arguments are the same as those for :class:`cupy.RawKernel`.
"""
check_cuda_available()
return cupy.RawKernel(code, name, *args, **kwargs)
# ------------------------------------------------------------------------------
# numpy/cupy compatible coding
# ------------------------------------------------------------------------------
def get_array_module(*args):
"""Gets an appropriate one from :mod:`numpy` or :mod:`cupy`.
This is almost equivalent to :func:`cupy.get_array_module`. The differences
are that this function can be used even if CUDA is not available and that
it will return their data arrays' array module for
:class:`~chainer.Variable` arguments.
.. deprecated:: v5.0.0
This API is deprecated. Please use
:func:`~chainer.backend.get_array_module` instead.
Args:
args: Values to determine whether NumPy or CuPy should be used.
Returns:
module: :mod:`cupy` or :mod:`numpy` is returned based on the types of
the arguments.
"""
return chainer.backend.get_array_module(*args)
def get_max_workspace_size():
"""Gets the workspace size for cuDNN.
Check "cuDNN Library User Guide" for detail.
Returns:
int: The workspace size for cuDNN.
"""
# To avoid error on no cuDNN environment
if cudnn_enabled:
return cudnn.get_max_workspace_size()
return 0
def set_max_workspace_size(size):
"""Sets the workspace size for cuDNN.
Check "cuDNN Library User Guide" for detail.
Args:
size: The workspace size for cuDNN.
"""
# To avoid error on no cuDNN environment
if cudnn_enabled:
cudnn.set_max_workspace_size(size)
def fuse(*args, **kwargs):
"""Function fusing decorator.
It calls :func:`cupy.fuse` when CuPy is available to make fused function
and does nothing otherwise.
.. seealso::
:func:`cupy.fuse`
"""
if available:
return cupy.fuse(*args, **kwargs)
elif len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return args[0]
else:
return lambda f: f
# ------------------------------------------------------------------------------
# cuDNN
# ------------------------------------------------------------------------------
_SHOULD_USE_CUDNN = {
'==always': {'always': True, 'auto': False, 'never': False},
'>=auto': {'always': True, 'auto': True, 'never': False},
}
_cudnn_version = cuda.cudnn.getVersion() if cudnn_enabled else -1
def should_use_cudnn(level, lowest_version=0):
"""Determines if we should use cuDNN.
This function checks ``chainer.config.use_cudnn``,
``chainer.backends.cuda.cudnn_enabled``, and the cuDNN version. Note that
``cudnn_enabled`` flag is fixed at loading of :mod:`chainer` module.
Args:
level (str): cuDNN use level. It must be either ``'==always'`` or
``'>=auto'``. ``'==always'`` indicates that the ``use_cudnn``
config must be ``'always'`` to use cuDNN.
lowest_version (int): Required lowest cuDNN version. It must be
non-negative.
Returns:
bool: ``True`` if the caller should use cuDNN.
"""
if _cudnn_version < lowest_version:
return False
if level not in _SHOULD_USE_CUDNN:
raise ValueError('invalid cuDNN use level: %s '
'(must be either of "==always" or ">=auto")' %
repr(level))
flags = _SHOULD_USE_CUDNN[level]
use_cudnn = config.use_cudnn
if use_cudnn not in flags:
raise ValueError('invalid use_cudnn configuration: %s '
'(must be either of "always", "auto", or "never")' %
repr(use_cudnn))
return flags[use_cudnn]
_tensor_core_flag = {'always': True, 'auto': None, 'never': False}
def should_use_cudnn_tensor_core(dtype):
"""Determines if Tensor Core should be used.
Args:
dtype (numpy.dtype): data type of input tensor.
Returns:
bool: ``True`` if Tensor Core should be used.
"""
use_cudnn_tensor_core = config.use_cudnn_tensor_core
if use_cudnn_tensor_core not in _tensor_core_flag:
raise ValueError('invalid use_cudnn_tensor_core configuration: %s '
'(must be either of "always", "auto", or "never")' %
repr(use_cudnn_tensor_core))
use_tensor_core = _tensor_core_flag[use_cudnn_tensor_core]
if use_tensor_core is None:
use_tensor_core = cudnn.is_tensor_core_available(dtype)
return use_tensor_core
# ------------------------------------------------------------------------------
# cupy.cudnn utility
# ------------------------------------------------------------------------------
def get_cudnn_dropout_states():
if not cudnn_enabled:
raise RuntimeError('cuDNN is not enabled.')
thread_id = threading.current_thread().ident
return get_cudnn_dropout_states_core(thread_id)
_dropout_states_count = itertools.count()
@memoize(for_each_device=True)
def get_cudnn_dropout_states_core(thread_id):
states_id = next(_dropout_states_count)
seed = os.getenv('CHAINER_SEED')
if seed is None:
try:
seed_str = binascii.hexlify(os.urandom(8))
seed = numpy.uint64(int(seed_str, 16))
except NotImplementedError:
seed = numpy.uint64(time.clock() * 1000000)
else:
seed = numpy.uint64(seed)
seed += numpy.uint64(states_id)
return cudnn.DropoutStates(None, seed)
def _get_cudnn_tensor_layout_x(x_layout):
if x_layout == chainer.memory_layouts.CUDNN_CHANNEL_FIRST_X:
return cuda.cudnn.CUDNN_TENSOR_NCHW
assert x_layout == chainer.memory_layouts.CUDNN_CHANNEL_LAST_X
return cuda.cudnn.CUDNN_TENSOR_NHWC
def _get_cudnn_tensor_layout_w(w_layout):
if w_layout == chainer.memory_layouts.CUDNN_CHANNEL_FIRST_W:
return cuda.cudnn.CUDNN_TENSOR_NCHW
assert w_layout == chainer.memory_layouts.CUDNN_CHANNEL_LAST_W
return cuda.cudnn.CUDNN_TENSOR_NHWC
| 26,329
| 30.915152
| 80
|
py
|
chainer
|
chainer-master/chainer/backends/_chainerx.py
|
import numpy
import chainer
from chainer import _backend
from chainer.backends import _cpu
from chainer.backends import cuda
from chainer.backends import intel64
import chainerx
class ChainerxDevice(_backend.Device):
"""Device for ChainerX backend"""
xp = chainerx
supported_array_types = (chainerx.ndarray,)
__hash__ = _backend.Device.__hash__
def __init__(self, device: 'chainerx.Device') -> None:
assert isinstance(device, chainerx.Device)
super(ChainerxDevice, self).__init__()
self.device = device # type: chainerx.Device
@staticmethod
def from_array(array):
if isinstance(array, chainerx.ndarray) and array.device is not None:
return ChainerxDevice(array.device)
return None
@staticmethod
def from_fallback_device(device):
"""Returns a :class:`~chainer.backend.ChainerxDevice` corresponding \
to the fallback device.
.. seealso::
:data:`~chainer.backend.ChainerxDevice.fallback_device`
"""
assert isinstance(device, _backend.Device)
if isinstance(device, _cpu.CpuDevice):
return ChainerxDevice(chainerx.get_device('native', 0))
if isinstance(device, cuda.GpuDevice):
return ChainerxDevice(
chainerx.get_device('cuda', device.device.id))
raise RuntimeError(
'Only CPU or GPU devices are allowed. '
'Actual: {}'.format(device))
@property
def name(self):
return self.device.name
@property
def fallback_device(self):
"""Fallback device.
A fallback device is either a :class:`~chainer.backend.CpuDevice` or
a :class:`~chainer.backend.GpuDevice` which shares the same physical
device with the original ChainerX device.
For example, the fallback device of ``native:0`` ChainerX device is
:class:`~chainer.backend.CpuDevice`. The fallback device of ``cuda:1``
ChainerX device is :class:`~chainer.backend.GpuDevice` with device ID
1.
"""
backend_name = self.device.backend.name
if backend_name == 'native':
return _cpu.CpuDevice()
if backend_name == 'cuda':
return cuda.GpuDevice.from_device_id(self.device.index)
raise RuntimeError(
'Only \'native\' or \'cuda\' devices have corresponding fallback '
'devices. Actual: {}'.format(backend_name))
def __eq__(self, other):
return (
isinstance(other, ChainerxDevice)
and other.device == self.device)
def __repr__(self):
return '<{} {}>'.format(
self.__class__.__name__, self.device.name)
def create_context(self):
# Returns a context that sets the default device.
return chainerx.using_device(self.device)
def send_array(self, array):
device = self.device
if isinstance(array, chainerx.ndarray):
if array.device is device:
return array
return array.to_device(device)
return _array_to_chainerx(array, device)
def use(self):
chainerx.set_default_device(self.device)
def is_array_supported(self, array):
return (
isinstance(array, chainerx.ndarray)
and self.device == array.device)
def to_chx(array):
"""Converts an array or arrays to ChainerX.
Destination ChainerX devices are chosen according to the types of input
arrays.
"""
return _backend._convert_arrays(array, _array_to_chainerx)
def from_chx(array):
"""Converts an array or arrays from ChainerX to NumPy or CuPy ones.
Destination array types are chosen such that no copies occur.
"""
return _backend._convert_arrays(array, _array_from_chainerx)
def _get_chainerx_device(device_spec):
# Returns chainerx.Device
if isinstance(device_spec, chainerx.Device):
return device_spec
return chainerx.get_device(device_spec)
def _array_to_chainerx(array, device=None):
# If device is None, appropriate device is chosen according to the input
# arrays.
assert device is None or isinstance(device, chainerx.Device)
if array is None:
return None
if array.dtype not in chainerx.all_dtypes:
raise TypeError(
'Dtype {} is not supported in ChainerX.'.format(array.dtype.name))
if isinstance(array, chainerx.ndarray):
if device is None:
return array
if device is array.device:
return array
return array.to_device(device)
if isinstance(array, numpy.ndarray):
if device is None:
device = chainerx.get_device('native', 0)
return chainerx.array(array, device=device, copy=False)
if isinstance(array, cuda.ndarray):
if device is None:
device = chainerx.get_device('cuda', array.device.id)
elif device.backend.name != 'cuda':
# cupy to non-cuda backend
# TODO(niboshi): Remove conversion to numpy when both CuPy and
# ChainerX support the array interface.
array = _cpu._to_cpu(array)
return chainerx.array(array, device=device, copy=False)
elif device.index != array.device.id:
# cupy to cuda backend but different device
array = cuda.to_gpu(array, device=device.index)
# cupy to cuda backend with the same device
return chainerx._core._fromrawpointer(
array.data.mem.ptr,
array.shape,
array.dtype,
array.strides,
device,
array.data.ptr - array.data.mem.ptr,
array)
if isinstance(array, intel64.mdarray):
return _array_to_chainerx(numpy.array(array), device)
if numpy.isscalar(array):
return chainerx.asarray(array)
raise TypeError(
'Array cannot be converted into chainerx.ndarray'
'\nActual type: {0}.'.format(type(array)))
def _array_from_chainerx(array):
if array is None:
return None
if not isinstance(array, chainerx.ndarray):
if isinstance(array, chainer.get_array_types()):
return array
raise TypeError(
'Tried to convert to a non-ChainerX array from an invalid type: '
'{}'.format(type(array)))
backend_name = array.device.backend.name
if backend_name == 'native':
return _cpu._to_cpu(array)
if backend_name == 'cuda':
return cuda.to_gpu(array, array.device.index)
raise ValueError(
'Only ChainerX arrays with native or cuda backends can be converted '
'to non-ChainerX arrays.\nActual: {0}.'.format(backend_name))
| 6,701
| 32.343284
| 78
|
py
|
chainer
|
chainer-master/chainer/backends/__init__.py
|
from chainer.backends import cuda # NOQA
from chainer.backends import intel64 # NOQA
# TODO(niboshi): Refactor registration of backend modules for functions like
# chainer.get_device().
| 190
| 26.285714
| 76
|
py
|
chainer
|
chainer-master/chainer/backends/_cpu.py
|
import numpy
from chainer import _backend
# TODO(kmaehashi): `from chainer.backends import cuda` causes circular imports.
# Surprisingly, `import chianer.backends` works as a workaround to avoid, but
# we should fix circular dependencies themselves around `chainer.backends.*`.
import chainer.backends
import chainerx
class CpuDevice(_backend.Device):
"""Device for CPU (NumPy) backend"""
name = '@numpy'
xp = numpy
supported_array_types = (numpy.ndarray,)
__hash__ = _backend.Device.__hash__
@staticmethod
def from_array(array):
if isinstance(array, numpy.ndarray):
return CpuDevice()
return None
def __eq__(self, other):
return isinstance(other, CpuDevice)
def __repr__(self):
return '<{} (numpy)>'.format(self.__class__.__name__)
def send_array(self, array):
return _array_to_cpu(array)
def is_array_supported(self, array):
return isinstance(array, numpy.ndarray)
def _to_cpu(array):
"""Converts an array or arrays to NumPy."""
return _backend._convert_arrays(array, _array_to_cpu)
def _array_to_cpu(array):
if array is None:
return None
if isinstance(array, numpy.ndarray):
return array
if isinstance(array, chainer.backends.intel64.mdarray):
return numpy.asarray(array)
if isinstance(array, chainerx.ndarray):
return chainerx.to_numpy(array, copy=False)
if isinstance(array, chainer.backends.cuda.ndarray):
with chainer.backends.cuda.get_device_from_array(array):
return array.get()
if numpy.isscalar(array):
return numpy.asarray(array)
raise TypeError(
'Array cannot be converted into an numpy.ndarray'
'\nActual type: {0}.'.format(type(array)))
| 1,783
| 27.774194
| 79
|
py
|
chainer
|
chainer-master/chainer/initializers/uniform.py
|
import numpy
from chainer import backend
from chainer import initializer
from chainer.utils import argument
# Original code forked from MIT licensed keras project
# https://github.com/fchollet/keras/blob/master/keras/initializations.py
class Uniform(initializer.Initializer):
"""Initializes array with a scaled uniform distribution.
Each element of the array is initialized by the value drawn
independently from uniform distribution :math:`[-scale, scale]`.
Attributes:
scale (float): A constant that determines the
scale of the uniform distribution.
dtype: Data type specifier.
rng (xp.random.RandomState): Pseudo-random number generator.
"""
def __init__(self, scale=0.05, dtype=None, **kwargs):
self.scale = scale
rng = None
if kwargs:
rng, = argument.parse_kwargs(kwargs, ('rng', rng))
self.rng = rng
super(Uniform, self).__init__(dtype)
def __call__(self, array):
if self.dtype is not None:
assert array.dtype == self.dtype,\
'{} != {}'.format(array.dtype, self.dtype)
if self.rng is None:
device = backend.get_device_from_array(array)
array[...] = device.xp.random.uniform(
low=-self.scale, high=self.scale, size=array.shape)
else:
backend.copyto(array, self.rng.uniform(
low=-self.scale, high=self.scale,
size=array.shape).astype(array.dtype, copy=False))
class LeCunUniform(initializer.Initializer):
"""Initializes array with a scaled uniform distribution.
Each element of the array is initialized by the value drawn
independently from uniform distribution :math:`[-s, s]`
where :math:`s = scale \\times \\sqrt{\\frac{3}{fan_{in}}}`.
Here :math:`fan_{in}` is the number of input units.
Reference: LeCun 98, Efficient Backprop
http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
Attributes:
scale (float): A constant that determines the
scale of the uniform distribution.
dtype: Data type specifier.
rng (xp.random.RandomState): Pseudo-random number generator.
"""
def __init__(self, scale=1.0, dtype=None, **kwargs):
self.scale = scale
rng = None
if kwargs:
rng, = argument.parse_kwargs(kwargs, ('rng', rng))
self.rng = rng
super(LeCunUniform, self).__init__(dtype)
def __call__(self, array):
if self.dtype is not None:
assert array.dtype == self.dtype,\
'{} != {}'.format(array.dtype, self.dtype)
fan_in, fan_out = initializer.get_fans(array.shape)
s = self.scale * numpy.sqrt(3. / fan_in)
Uniform(s, rng=self.rng)(array)
class GlorotUniform(initializer.Initializer):
"""Initializes array with a scaled uniform distribution.
Each element of the array is initialized by the value drawn
independently from uniform distribution :math:`[-s, s]`
where :math:`s = scale \\times \\sqrt{\\frac{6}{fan_{in} + fan_{out}}}`.
Here, :math:`fan_{in}` and :math:`fan_{out}` are the number of
input and output units, respectively.
Attributes:
scale (float): A constant that determines the
scale of the uniform distribution.
dtype: Data type specifier.
rng (xp.random.RandomState): Pseudo-random number generator.
"""
def __init__(self, scale=1.0, dtype=None, **kwargs):
self.scale = scale
rng = None
if kwargs:
rng, = argument.parse_kwargs(kwargs, ('rng', rng))
self.rng = rng
super(GlorotUniform, self).__init__(dtype)
def __call__(self, array):
if self.dtype is not None:
assert array.dtype == self.dtype,\
'{} != {}'.format(array.dtype, self.dtype)
fan_in, fan_out = initializer.get_fans(array.shape)
s = self.scale * numpy.sqrt(6. / (fan_in + fan_out))
Uniform(s, rng=self.rng)(array)
class HeUniform(initializer.Initializer):
"""Initializes array with scaled uniform distribution.
Each element of the array is initialized by the value drawn
independently from uniform distribution :math:`[-s, s]`
where :math:`s = scale \\times \\sqrt{\\frac{6}{fan_{in}}}`.
Here, :math:`fan_{in}` is the number of input units.
Attributes:
scale (float): A constant that determines the
scale of the uniform distribution.
dtype: Data type specifier.
rng (xp.random.RandomState): Pseudo-random number generator.
"""
def __init__(self, scale=1.0, dtype=None, **kwargs):
self.scale = scale
rng = None
if kwargs:
rng, = argument.parse_kwargs(kwargs, ('rng', rng))
self.rng = rng
super(HeUniform, self).__init__(dtype)
def __call__(self, array):
if self.dtype is not None:
assert array.dtype == self.dtype,\
'{} != {}'.format(array.dtype, self.dtype)
fan_in, fan_out = initializer.get_fans(array.shape)
s = self.scale * numpy.sqrt(6. / fan_in)
Uniform(s, rng=self.rng)(array)
| 5,202
| 33.230263
| 76
|
py
|
chainer
|
chainer-master/chainer/initializers/normal.py
|
import numpy
from chainer import backend
from chainer.backends import cuda
from chainer import initializer
from chainer.utils import argument
# Original code forked from MIT licensed keras project
# https://github.com/fchollet/keras/blob/master/keras/initializations.py
class Normal(initializer.Initializer):
"""Initializes array with a normal distribution.
Each element of the array is initialized by the value drawn
independently from Gaussian distribution whose mean is 0,
and standard deviation is ``scale``.
Args:
scale (float): Standard deviation of Gaussian distribution.
dtype: Data type specifier.
rng (xp.random.RandomState): Pseudo-random number generator.
"""
def __init__(self, scale=0.05, dtype=None, **kwargs):
self.scale = scale
rng = None
if kwargs:
rng, = argument.parse_kwargs(kwargs, ('rng', rng))
self.rng = rng
super(Normal, self).__init__(dtype)
def __call__(self, array):
if self.dtype is not None:
assert array.dtype == self.dtype,\
'{} != {}'.format(array.dtype, self.dtype)
if self.rng is None:
device = backend.get_device_from_array(array)
args = {'loc': 0.0, 'scale': self.scale, 'size': array.shape}
if device.xp is cuda.cupy:
# Only CuPy supports dtype option
if self.dtype == numpy.float32 or self.dtype == numpy.float16:
# float16 is not supported in cuRAND
args['dtype'] = numpy.float32
array[...] = device.xp.random.normal(**args)
else:
backend.copyto(array, self.rng.normal(
loc=0.0, scale=self.scale,
size=array.shape).astype(array.dtype, copy=False))
class LeCunNormal(initializer.Initializer):
"""Initializes array with scaled Gaussian distribution.
Each element of the array is initialized by the value drawn
independently from Gaussian distribution whose mean is 0,
and standard deviation is
:math:`scale \\times \\sqrt{\\frac{1}{fan_{in}}}`,
where :math:`fan_{in}` is the number of input units.
Reference: LeCun 98, Efficient Backprop
http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
Args:
scale (float): A constant that determines the scale
of the standard deviation.
dtype: Data type specifier.
rng (xp.random.RandomState): Pseudo-random number generator.
"""
def __init__(self, scale=1.0, dtype=None, **kwargs):
self.scale = scale
rng = None
if kwargs:
rng, = argument.parse_kwargs(kwargs, ('rng', rng))
self.rng = rng
super(LeCunNormal, self).__init__(dtype)
def __call__(self, array):
if self.dtype is not None:
assert array.dtype == self.dtype,\
'{} != {}'.format(array.dtype, self.dtype)
fan_in, fan_out = initializer.get_fans(array.shape)
s = self.scale * numpy.sqrt(1. / fan_in)
Normal(s, rng=self.rng)(array)
class GlorotNormal(initializer.Initializer):
"""Initializes array with scaled Gaussian distribution.
Each element of the array is initialized by the value drawn
independently from Gaussian distribution whose mean is 0,
and standard deviation is
:math:`scale \\times \\sqrt{\\frac{2}{fan_{in} + fan_{out}}}`,
where :math:`fan_{in}` and :math:`fan_{out}` are the number of
input and output units, respectively.
Reference: Glorot & Bengio, AISTATS 2010
Args:
scale (float): A constant that determines the scale
of the standard deviation.
dtype: Data type specifier.
rng (xp.random.RandomState): Pseudo-random number generator.
"""
def __init__(self, scale=1.0, dtype=None, **kwargs):
self.scale = scale
rng = None
if kwargs:
rng, = argument.parse_kwargs(kwargs, ('rng', rng))
self.rng = rng
super(GlorotNormal, self).__init__(dtype)
def __call__(self, array):
if self.dtype is not None:
assert array.dtype == self.dtype,\
'{} != {}'.format(array.dtype, self.dtype)
fan_in, fan_out = initializer.get_fans(array.shape)
s = self.scale * numpy.sqrt(2. / (fan_in + fan_out))
Normal(s, rng=self.rng)(array)
class HeNormal(initializer.Initializer):
"""Initializes array with scaled Gaussian distribution.
Each element of the array is initialized by the value drawn
independently from Gaussian distribution whose mean is 0,
and standard deviation is
:math:`scale \\times \\sqrt{\\frac{2}{fan}}`.
If ``fan_option == 'fan_in'``, :math:`fan` is the
number of input units.
If ``fan_option == 'fan_out'``, :math:`fan` is the
number of output units.
Reference: He et al., https://arxiv.org/abs/1502.01852
Args:
scale (float): A constant that determines the scale
of the standard deviation.
dtype: Data type specifier.
fan_option ({'fan_in', 'fan_out'}): Decides how to compute the
standard deviation. The default value is ``'fan_in'``.
rng (xp.random.RandomState): Pseudo-random number generator.
"""
def __init__(self, scale=1.0, dtype=None, fan_option='fan_in', **kwargs):
self.scale = scale
self.fan_option = fan_option
rng = None
if kwargs:
rng, = argument.parse_kwargs(kwargs, ('rng', rng))
self.rng = rng
super(HeNormal, self).__init__(dtype)
def __call__(self, array):
if self.dtype is not None:
assert array.dtype == self.dtype,\
'{} != {}'.format(array.dtype, self.dtype)
fan_in, fan_out = initializer.get_fans(array.shape)
if self.fan_option == 'fan_in':
s = self.scale * numpy.sqrt(2. / fan_in)
elif self.fan_option == 'fan_out':
s = self.scale * numpy.sqrt(2. / fan_out)
else:
raise ValueError(
'fan_option should be either \'fan_in\' or \'fan_out\'.')
Normal(s, rng=self.rng)(array)
| 6,197
| 34.016949
| 78
|
py
|
chainer
|
chainer-master/chainer/initializers/constant.py
|
import numpy
import chainer
from chainer import backend
from chainer import initializer
from chainer import types # NOQA
class Identity(initializer.Initializer):
"""Initializes array with the identity matrix.
It initializes the given array with the constant
multiple of the identity matrix.
Note that arrays to be passed must be 2D squared matrices.
Attributes:
scale (scalar): A constant to be multiplied to identity matrices.
"""
def __init__(self, scale=1.0, dtype=None):
self.scale = scale
super(Identity, self).__init__(dtype)
def __call__(self, array):
if self.dtype is not None:
assert array.dtype == self.dtype
shape = array.shape
if len(shape) != 2 or shape[0] != shape[1]:
raise ValueError('Identity matrix initialization can only be used '
'for 2D squared matrices.')
device = backend.get_device_from_array(array)
array[...] = device.xp.identity(shape[0]) * self.scale
class _Constant(initializer.Initializer):
fill_value = None # type: types.ScalarValue
def __init__(self, dtype=None):
if not (isinstance(self.fill_value, chainer.get_array_types())
or numpy.isscalar(self.fill_value)):
raise ValueError(
'fill_value must be either scalar, numpy.ndarray, '
'cupy.ndarray or chainerx.ndarray.')
super(_Constant, self).__init__(dtype)
def __call__(self, array):
if self.dtype is not None:
assert array.dtype == self.dtype
# Calling copy to ensures that the fill_value array
# is moved to the device where array resides
if isinstance(self.fill_value, chainer.get_array_types()):
backend.copyto(
array, self.fill_value.astype(array.dtype, copy=False))
else:
device = backend.get_device_from_array(array)
array[...] = device.xp.asarray(self.fill_value)
class Constant(_Constant):
"""Initializes array with constant value.
Attributes:
~Constant.fill_value (scalar or :ref:`ndarray`):
A constant to be assigned to the initialized array.
Broadcast is allowed on this assignment.
dtype: Data type specifier.
"""
def __init__(self, fill_value, dtype=None):
self.fill_value = fill_value
super(Constant, self).__init__(dtype)
class Zero(_Constant):
"""Initializes array to all-zero.
Attributes:
~Zero.dtype: Data type specifier.
"""
fill_value = 0.0
class One(_Constant):
"""Initializes array to all-one.
Attributes:
~One.dtype: Data type specifier.
"""
fill_value = 1.0
class NaN(_Constant):
"""Initializes array to all-NaN.
Attributes:
~NaN.dtype: Data type specifier.
"""
fill_value = numpy.nan
| 2,918
| 25.779817
| 79
|
py
|
chainer
|
chainer-master/chainer/initializers/sampling.py
|
import numpy
from chainer.backends import cuda
from chainer import initializer
# Original code from Berkeley FCN
# https://github.com/shelhamer/fcn.berkeleyvision.org/blob/master/surgery.py
def _get_linear_filter(size, ndim, upsampling=True):
"""Make a 2D and 3D linear kernel suitable for up/downsampling"""
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1.
else:
center = factor - 0.5
slices = (slice(size),) * ndim
og = numpy.ogrid[slices]
filt = 1.
for og_i in og:
filt = filt * (1. - abs(og_i - center) / factor)
if not upsampling:
filt /= filt.sum()
return filt
class _SamplingFilter(initializer.Initializer):
def __init__(self, upsampling=True, interpolation='linear', dtype=None):
self._upsampling = upsampling
if interpolation == 'linear':
self._get_filter_func = _get_linear_filter
else:
raise ValueError(
'Unsupported interpolation method: {}'.format(interpolation))
super(_SamplingFilter, self).__init__(dtype)
def __call__(self, array):
if self.dtype is not None:
assert array.dtype == self.dtype
xp = cuda.get_array_module(array)
in_c, out_c = array.shape[:2]
assert in_c == out_c or out_c == 1
ksize = None
for k in array.shape[2:]:
if ksize is None:
ksize = k
else:
if ksize != k:
raise ValueError(
'ksize must be all same: {} != {}'.format(ksize, k))
filt = self._get_filter_func(
ksize, ndim=array.ndim - 2, upsampling=self._upsampling)
filt = xp.asarray(filt)
array[...] = 0.
if out_c == 1:
array[xp.arange(in_c), 0, ...] = filt
else:
array[xp.arange(in_c), xp.arange(out_c), ...] = filt
class UpsamplingDeconvFilter(_SamplingFilter):
"""Initializes array with upsampling filter.
The array is initialized with a standard image upsampling weight.
This initializer is often used as initial weight for
:func:`~chainer.links.DeconvolutionND`.
:func:`~chainer.links.DeconvolutionND` is expected that its `stride` is
equal to `(ksize + 1) // 2`.
Reference: Long et al., https://arxiv.org/abs/1411.4038
Attributes:
interpolation (str): Upsampling interpolation method.
Default is 'linear'.
"""
def __init__(self, interpolation='linear', dtype=None):
if interpolation != 'linear':
raise ValueError(
'Unsupported interpolation method: {}'.format(interpolation))
super(UpsamplingDeconvFilter, self).__init__(
upsampling=True, interpolation=interpolation, dtype=dtype)
class DownsamplingConvFilter(_SamplingFilter):
"""Initializes array with downsampling filter.
The array is initialized with a standard image downsampling weight.
This initializer is often used as initial weight for
:func:`~chainer.links.ConvolutionND`.
:func:`~chainer.links.ConvolutionND` is expected that its `stride` is
equal to `(ksize + 1) // 2`.
Reference: Long et al., https://arxiv.org/abs/1411.4038
Attributes:
interpolation (str): Downsampling interpolation method.
Default is 'linear'.
"""
def __init__(self, interpolation='linear', dtype=None):
if interpolation != 'linear':
raise ValueError(
'Unsupported interpolation method: {}'.format(interpolation))
super(DownsamplingConvFilter, self).__init__(
upsampling=False, interpolation=interpolation, dtype=dtype)
| 3,704
| 30.939655
| 77
|
py
|
chainer
|
chainer-master/chainer/initializers/orthogonal.py
|
import numpy
from chainer import backend
from chainer import initializer
from chainer import utils
from chainer.utils import argument
_orthogonal_constraints = { # (assert emb., assert proj.)
'auto': (False, False),
'projection': (False, True),
'embedding': (True, False),
'basis': (True, True),
}
# Original code forked from MIT licensed keras project
# https://github.com/fchollet/keras/blob/master/keras/initializations.py
class Orthogonal(initializer.Initializer):
"""Initializes array with an orthogonal system.
This initializer first makes a matrix of the same shape as the
array to be initialized whose elements are drawn independently from
standard Gaussian distribution.
Next, it applies QR decomposition to (the transpose of) the matrix.
To make the decomposition (almost surely) unique, we require the diagonal
of the triangular matrix R to be non-negative (see e.g. Edelman & Rao,
https://web.eecs.umich.edu/~rajnrao/Acta05rmt.pdf).
Then, it initializes the array with the (semi-)orthogonal matrix Q.
Finally, the array is multiplied by the constant ``scale``.
If the ``ndim`` of the input array is more than 2, we consider the array
to be a matrix by concatenating all axes except the first one.
The number of vectors consisting of the orthogonal system
(i.e. first element of the shape of the array) must be equal to or smaller
than the dimension of each vector (i.e. second element of the shape of
the array).
Attributes:
scale (float): A constant to be multiplied by.
dtype: Data type specifier.
mode (str): Assertion on the initialized shape.
``'auto'`` (default), ``'projection'`` (before v7),
``'embedding'``, or ``'basis'``.
rng (xp.random.RandomState): Pseudo-random number generator.
Reference: Saxe et al., https://arxiv.org/abs/1312.6120
"""
def __init__(self, scale=1.1, dtype=None, mode='auto', **kwargs):
self.scale = scale
self.mode = mode
rng = None
if kwargs:
rng, = argument.parse_kwargs(kwargs, ('rng', rng))
self.rng = rng
try:
self._checks = _orthogonal_constraints[mode]
except KeyError:
raise ValueError(
'Invalid mode: {}. Choose from {}.'.format(
repr(mode),
', '.join(repr(m) for m in _orthogonal_constraints)))
super(Orthogonal, self).__init__(dtype)
# TODO(Kenta Oono)
# How do we treat overcomplete base-system case?
def __call__(self, array):
if self.dtype is not None:
assert array.dtype == self.dtype,\
'{} != {}'.format(array.dtype, self.dtype)
if not array.shape: # 0-dim case
if self.rng is None:
a = numpy.random.randint(2)
else:
a = self.rng.randint(2)
a = int(a)
array[...] = self.scale * (2 * a - 1)
elif not array.size:
raise ValueError('Array to be initialized must be non-empty.')
else:
# numpy.prod returns float value when the argument is empty.
out_dim = len(array)
in_dim = utils.size_of_shape(array.shape[1:])
if (in_dim > out_dim and self._checks[0]) or (
in_dim < out_dim and self._checks[1]):
raise ValueError(
'Cannot make orthogonal {}. '
'shape = {}, interpreted as '
'{}-dim input and {}-dim output.'.format(
self.mode, array.shape, in_dim, out_dim))
transpose = in_dim > out_dim
if self.rng is None:
a = numpy.random.normal(size=(out_dim, in_dim))
else:
a_tmp = self.rng.normal(size=(out_dim, in_dim))
a = numpy.empty(a_tmp.shape, dtype=a_tmp.dtype)
backend.copyto(a, a_tmp)
if transpose:
a = a.T
# cupy.linalg.qr requires cusolver in CUDA 8+
q, r = numpy.linalg.qr(a)
q *= numpy.copysign(self.scale, numpy.diag(r))
if transpose:
q = q.T
backend.copyto(array, q.reshape(array.shape).astype(
array.dtype, copy=False))
| 4,361
| 38.297297
| 78
|
py
|
chainer
|
chainer-master/chainer/initializers/__init__.py
|
import typing as tp # NOQA
import numpy
import chainer
from chainer import backend
from chainer.backends import _chainerx # NOQA
# import class and function
from chainer.initializers.constant import Constant
from chainer.initializers.constant import Identity # NOQA
from chainer.initializers.constant import NaN # NOQA
from chainer.initializers.constant import One # NOQA
from chainer.initializers.constant import Zero # NOQA
from chainer.initializers.normal import GlorotNormal # NOQA
from chainer.initializers.normal import HeNormal # NOQA
from chainer.initializers.normal import LeCunNormal
from chainer.initializers.normal import Normal # NOQA
from chainer.initializers.orthogonal import Orthogonal # NOQA
from chainer.initializers.sampling import DownsamplingConvFilter # NOQA
from chainer.initializers.sampling import UpsamplingDeconvFilter # NOQA
from chainer.initializers.uniform import GlorotUniform # NOQA
from chainer.initializers.uniform import HeUniform # NOQA
from chainer.initializers.uniform import LeCunUniform # NOQA
from chainer.initializers.uniform import Uniform # NOQA
from chainer import types # NOQA
def generate_array(
initializer: types.AbstractInitializer,
shape: types.ShapeSpec,
xp: types.Xp,
dtype: tp.Optional[types.DTypeSpec] = None,
device: tp.Optional[types.DeviceSpec] = None
) -> types.NdArray:
"""Return initialized array.
The algorithms used to make the new values depend on the
concrete derived classes. If the initializer has the ``dtype`` attribute,
it is used to construct the array. Otherwise, ``chainer.config.dtype`` is
used instead. See :ref:`configuration` for the dtype config.
Args:
initializer: A callable object that takes :ref:`ndarray` and edits its
value.
shape (int or tuple of int): Shape of the initialized array.
xp (module): :mod:`cupy`, :mod:`numpy`, or :mod:`chainerx`.
dtype: Dtype specifier. If omitted, ``initializer.dtype`` is used.
device: Target device specifier. If omitted, the current device is
used for :mod:`cupy`, and the default device is used for
:mod:`chainerx`.
Returns:
:ref:`ndarray`: An initialized array.
"""
dtype_attr = getattr(initializer, 'dtype', None)
if dtype is not None and dtype_attr is not None \
and numpy.dtype(dtype) != numpy.dtype(dtype_attr):
raise ValueError(
'dtype mismatch: {} != {}'.format(dtype, dtype_attr))
if dtype is None:
dtype = dtype_attr
dtype = chainer.get_dtype(dtype)
if device is None:
backend_device = backend._guess_device_from_array_module(xp)
else:
backend_device = chainer.get_device(device)
if xp != backend_device.xp:
raise ValueError('xp and device arguments are inconsistent.')
with chainer.using_device(backend_device):
array = xp.empty(shape, dtype=dtype)
initializer(array)
return array
def _get_initializer(
initializer: tp.Optional[types.InitializerSpec]
) -> types.AbstractInitializer:
if initializer is None:
return LeCunNormal()
if (isinstance(initializer, chainer.get_array_types())
or numpy.isscalar(initializer)):
return Constant(initializer)
if not callable(initializer):
raise TypeError('invalid type of initializer: %s' % type(initializer))
return initializer
def _check_is_initializer_like(initializer):
if not (initializer is None
or isinstance(initializer, chainer.Initializer)
or callable(initializer)
or isinstance(initializer, chainer.get_array_types())
or numpy.isscalar(initializer)):
raise TypeError(
'Initializer is of wrong type: {}. Allowed types are Initializer, '
'ndarray and scalar.'.format(type(initializer)))
| 3,923
| 37.851485
| 79
|
py
|
chainer
|
chainer-master/chainer/link_hooks/spectral_normalization.py
|
import chainer
from chainer import backend
from chainer import configuration
import chainer.functions as F
from chainer import link_hook
import chainer.links as L
from chainer import variable
import chainerx
def l2normalize(xp, v, eps):
"""Normalize a vector by its L2 norm.
Args:
xp (numpy or cupy):
v (numpy.ndarray or cupy.ndarray)
eps (float): Epsilon value for numerical stability.
Returns:
:class:`numpy.ndarray` or :class:`cupy.ndarray`
"""
# TODO(crcrpar): Remove this when chainerx.linalg.norm becomes available.
if xp is chainerx:
# NOTE(crcrpar): `chainerx.power` is not available as of 2019/03/27.
# See https://github.com/chainer/chainer/pull/6522
norm = chainerx.sqrt(chainerx.sum(v * v))
else:
norm = xp.linalg.norm(v)
return v / (norm + eps)
def update_approximate_vectors(
weight_matrix, u, n_power_iteration, eps):
"""Update the first left and right singular vectors.
This function updates the first left singular vector `u` and
the first right singular vector `v`.
Args:
weight_matrix (~chainer.Variable): 2D weight.
u (numpy.ndarray, cupy.ndarray, or None):
Vector that approximates the first left singular vector and
has the shape of (out_size,).
n_power_iteration (int): Number of iterations to approximate
the first right and left singular vectors.
Returns:
:class:`numpy.ndarray` or `cupy.ndarray`:
Approximate first left singular vector.
:class:`numpy.ndarray` or `cupy.ndarray`:
Approximate first right singular vector.
"""
weight_matrix = weight_matrix.array
xp = backend.get_array_module(weight_matrix)
for _ in range(n_power_iteration):
v = l2normalize(xp, xp.dot(u, weight_matrix), eps)
u = l2normalize(xp, xp.dot(weight_matrix, v), eps)
return u, v
def calculate_max_singular_value(weight_matrix, u, v):
"""Calculate max singular value by power iteration method.
Args:
weight_matrix (~chainer.Variable)
u (numpy.ndarray or cupy.ndarray)
v (numpy.ndarray or cupy.ndarray)
Returns:
~chainer.Variable: Max singular value via power iteration method.
"""
sigma = F.matmul(F.matmul(u, weight_matrix), v)
return sigma
class SpectralNormalization(link_hook.LinkHook):
"""Spectral Normalization link hook implementation.
This hook normalizes a weight using max singular value and this value
is computed via power iteration method. Currently, this hook is supposed to
be added to :class:`chainer.links.Linear`, :class:`chainer.links.EmbedID`,
:class:`chainer.links.Convolution2D`, :class:`chainer.links.ConvolutionND`,
:class:`chainer.links.Deconvolution2D`,
and :class:`chainer.links.DeconvolutionND`. However, you can use this to
other links like RNNs by specifying ``weight_name``.
It is highly recommended to add this hook before optimizer setup because
this hook add a scaling parameter ``gamma`` if ``use_gamma`` is True.
Otherwise, the registered ``gamma`` will not be updated.
.. math::
\\bar{\\mathbf{W}} &=& \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\
\\text{, where} \\ \\sigma(\\mathbf{W}) &:=&
\\max_{\\mathbf{h}: \\mathbf{h} \\ne 0}
\\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2}
= \\max_{\\|\\mathbf{h}\\|_2 \\le 1} \\|\\mathbf{W}\\mathbf{h}\\|_2
See: T. Miyato et. al., `Spectral Normalization for Generative Adversarial
Networks <https://arxiv.org/abs/1802.05957>`_
Args:
n_power_iteration (int): Number of power iteration.
The default value is 1.
eps (float): Numerical stability in norm calculation.
The default value is 1e-6 for the compatibility with
mixed precision training. The value used in the author's
implementation is 1e-12.
use_gamma (bool): If ``True``, weight scaling parameter gamma which is
initialized by initial weight's max singular value is introduced.
factor (float, None): Scaling parameter to divide maximum singular
value. The default value is 1.0.
weight_name (str): Link's weight name to apply this hook. The default
value is ``'W'``.
name (str or None): Name of this hook. The default value is
``'SpectralNormalization'``.
Attributes:
vector_name (str): Name of the approximate first left singular vector
registered in the target link.
the target link.
axis (int): Axis of weight represents the number of output
feature maps or output units (``out_channels`` and
``out_size``, respectively).
.. admonition:: Example
There are almost the same but 2 ways to apply spectral normalization
(SN) hook to links.
1. Initialize link and SN separately. This makes it easy to handle
buffer and parameter of links registered by SN hook.
>>> l = L.Convolution2D(3, 5, 3)
>>> hook = chainer.link_hooks.SpectralNormalization()
>>> _ = l.add_hook(hook)
>>> # Check the shape of the first left singular vector.
>>> getattr(l, hook.vector_name).shape
(5,)
>>> # Delete SN hook from this link.
>>> l.delete_hook(hook.name)
2. Initialize both link and SN hook at one time. This makes it easy to
define your original :class:`~chainer.Chain`.
>>> # SN hook handles lazy initialization!
>>> layer = L.Convolution2D(
... 5, 3, stride=1, pad=1).add_hook(
... chainer.link_hooks.SpectralNormalization())
"""
name = 'SpectralNormalization'
def __init__(self, n_power_iteration=1, eps=1e-6, use_gamma=False,
factor=None, weight_name='W', name=None):
assert n_power_iteration > 0
self.n_power_iteration = n_power_iteration
self.eps = eps
self.use_gamma = use_gamma
self.factor = factor
self.weight_name = weight_name
self.vector_name = weight_name + '_u'
self._initialized = False
self.axis = 0
if name is not None:
self.name = name
def __enter__(self):
raise NotImplementedError(
'This hook is not supposed to be used as context manager.')
def __exit__(self):
raise NotImplementedError
def added(self, link):
# Define axis and register ``u`` if the weight is initialized.
if not hasattr(link, self.weight_name):
raise ValueError(
'Weight \'{}\' does not exist!'.format(self.weight_name))
if isinstance(link, (L.Deconvolution2D, L.DeconvolutionND)):
self.axis = 1
if getattr(link, self.weight_name).array is not None:
self._prepare_parameters(link)
def deleted(self, link):
# Remove approximate vector ``u`` and parameter ``gamma` if exists.
delattr(link, self.vector_name)
if self.use_gamma:
del link.gamma
def forward_preprocess(self, cb_args):
# This method normalizes target link's weight spectrally
# using power iteration method
link = cb_args.link
input_variable = cb_args.args[0]
if not self._initialized:
self._prepare_parameters(link, input_variable)
weight = getattr(link, self.weight_name)
# For link.W or equivalents to be chainer.Parameter
# consistently to users, this hook maintains a reference to
# the unnormalized weight.
self.original_weight = weight
# note: `normalized_weight` is ~chainer.Variable
normalized_weight = self.normalize_weight(link)
setattr(link, self.weight_name, normalized_weight)
def forward_postprocess(self, cb_args):
# Here, the computational graph is already created,
# we can reset link.W or equivalents to be Parameter.
link = cb_args.link
setattr(link, self.weight_name, self.original_weight)
def _prepare_parameters(self, link, input_variable=None):
"""Prepare one buffer and one parameter.
Args:
link (:class:`~chainer.Link`): Link to normalize spectrally.
input_variable (:class:`~chainer.Variable`):
The first minibatch to initialize weight.
"""
if getattr(link, self.weight_name).array is None:
if input_variable is not None:
link._initialize_params(input_variable.shape[1])
initialW = getattr(link, self.weight_name)
if initialW.shape[self.axis] == 0:
raise ValueError(
'Expect {}.shape[{}] > 0'.format(self.weight_name, self.axis)
)
u = link.xp.random.normal(
size=(initialW.shape[self.axis],)).astype(dtype=initialW.dtype)
setattr(link, self.vector_name, u)
link.register_persistent(self.vector_name)
if self.use_gamma:
# Initialize the scaling parameter with the max singular value.
weight_matrix = self.reshape_W(initialW.array)
# TODO(crcrpar): Remove this when chainerx supports SVD.
device = link.device
if device.xp is chainerx:
fallback_device = device.fallback_device
weight_matrix_ = fallback_device.send(weight_matrix)
with chainer.using_device(fallback_device):
_, s_, _ = fallback_device.xp.linalg.svd(weight_matrix_)
s = device.send(s_)
else:
_, s, _ = link.xp.linalg.svd(weight_matrix)
s0 = chainer.utils.force_array(s[0])
with link.init_scope():
link.gamma = variable.Parameter(s0)
self._initialized = True
def normalize_weight(self, link):
"""Normalize target weight before every single forward computation."""
weight_name, vector_name = self.weight_name, self.vector_name
W = getattr(link, weight_name)
u = getattr(link, vector_name)
weight_matrix = self.reshape_W(W)
if not configuration.config.in_recomputing:
with chainer.using_device(link.device):
u, v = update_approximate_vectors(
weight_matrix, u, self.n_power_iteration, self.eps)
else:
v = self.v
sigma = calculate_max_singular_value(weight_matrix, u, v)
if self.factor is not None:
sigma /= self.factor
if self.use_gamma:
W = link.gamma * W / sigma
else:
W = W / sigma
if not configuration.config.in_recomputing:
self.v = v
with chainer.using_device(link.device):
if configuration.config.train:
if link.xp is chainerx:
# TODO(crcrpar): Remove this when
# chainerx supports `copyto`.
getattr(link, vector_name)[:] = u
else:
backend.copyto(getattr(link, vector_name), u)
return W
def reshape_W(self, W):
"""Reshape & transpose weight into 2D if necessary."""
if self.axis != 0:
axes = [self.axis] + [i for i in range(W.ndim) if i != self.axis]
W = W.transpose(axes)
if W.ndim == 2:
return W
return W.reshape(W.shape[0], -1)
| 11,603
| 38.604096
| 79
|
py
|
chainer
|
chainer-master/chainer/link_hooks/timer.py
|
import collections
import os
import sys
import time
import numpy
from chainer.backends import cuda
from chainer import link_hook
# Select the best-resolution timer function
try:
_get_time = time.perf_counter
except AttributeError:
if os.name == 'nt':
_get_time = time.clock
else:
_get_time = time.time
class TimerHook(link_hook.LinkHook):
"""Link hook for measuring elapsed time of \
:meth:`Link.forward() <chainer.Link.forward>`.
Example:
Code example::
from chainer.link_hooks import TimerHook
hook = TimerHook()
with hook:
trainer.run()
hook.print_report()
Output example::
LinkName ElapsedTime Occurrence
Linear 41.42sec 2100
MLP 42.09sec 700
Classifier 42.39sec 700
where *LinkName* is the name of link that calls the hook,
and *ElapsedTime* is the elapsed time the link consumed,
and *Occurrence* is the number of calls.
Warning:
Call graph of links are hierarchical. That means reported elapsed times
may be overlapping with each other and the sum may exceed the total
time.
Attributes:
call_history: List of measurement results. It consists of pairs of
the name of the link that calls this hook and the elapsed time
the :meth:`forward` method of link consumes.
"""
name = 'TimerHook'
table = {'sec': 1, 'ms': 10 ** 3, 'us': 10 ** 6, 'ns': 10 ** 9}
def __init__(self):
self.call_history = []
self._running_stack = []
self._depth = 0
self._total_time = 0
def _preprocess(self):
if self.xp is numpy:
start = _get_time()
self._running_stack.append(start)
else:
assert self.xp is cuda.cupy
start = cuda.Event()
stop = cuda.Event()
start.record()
self._running_stack.append((start, stop))
self._depth += 1
def forward_preprocess(self, args):
self.xp = args.link.xp
self._preprocess()
def _postprocess(self, link):
if self.xp is numpy:
start = self._running_stack.pop()
stop = _get_time()
elapsed_time = stop - start
else:
assert self.xp is cuda.cupy
start, stop = self._running_stack.pop()
stop.record()
stop.synchronize()
# Note that `get_elapsed_time` returns result in milliseconds
elapsed_time = cuda.cupy.cuda.get_elapsed_time(
start, stop) / 1000
self.call_history.append((link.__class__.__name__, elapsed_time))
assert self._depth > 0
self._depth -= 1
if self._depth == 0:
self._total_time += elapsed_time
def forward_postprocess(self, args):
link = args.link
assert link.xp == self.xp
self._postprocess(link)
def total_time(self):
"""Returns total elapsed time in seconds."""
return self._total_time
def summary(self):
"""Returns a summary of time profiling in links.
Returns:
A summarized dictionary whose keys are link names and
values are dictionaries of `elapsed_time` and `occurrence`.
"""
summary = collections.OrderedDict()
for link_name, elapsed_time in self.call_history:
if link_name not in summary:
summary[link_name] = {'elapsed_time': 0, 'occurrence': 0}
record = summary[link_name]
record['elapsed_time'] += elapsed_time
record['occurrence'] += 1
return summary
def _choose_unit(self, second):
"""Choose optimal unit."""
factor = 1
for unit in ['sec', 'ms', 'us']:
if second * factor >= 1:
return factor, unit
factor *= 1000.0
return factor, 'ns'
def print_report(self, unit='auto', file=sys.stdout):
"""Prints a summary report of time profiling in links.
Args:
unit (str): Supplementary units used for computational times.
`sec`, `ms`, `us`, `ns`, `auto`(default) and `auto_foreach`
are supported. If `auto`, units of times are aligned to the
largest, and if `auto_foreach`, units of times are adjusted for
each element.
"""
entries = [['LinkName', 'ElapsedTime', 'Occurrence']]
auto_foreach = (unit == 'auto_foreach')
if unit == 'auto':
max_time = max(
record['elapsed_time'] for record in self.summary().values())
factor, unit = self._choose_unit(max_time)
elif not auto_foreach:
factor = self.table[unit]
for link_name, record in self.summary().items():
second = record['elapsed_time']
if auto_foreach:
factor, unit = self._choose_unit(second)
elapsed_time = '%3.2f%s' % (second * factor, unit)
occurrence = str(record['occurrence'])
entries.append([link_name, elapsed_time, occurrence])
entry_widths = []
entry_widths.append(max(len(f) for f, _, _ in entries))
entry_widths.append(max(len(e) for _, e, _ in entries))
entry_widths.append(max(len(o) for _, _, o in entries))
template = ' '.join('{:>%d}' % w for w in entry_widths)
for link_name, elapsed_time, occurrence in entries:
line = template.format(link_name, elapsed_time, occurrence)
file.write(line)
file.write('\n')
file.flush()
# TODO(crcrpar): Support backward pre/post process.
# See https://github.com/chainer/chainer/issues/5197
| 5,855
| 32.849711
| 79
|
py
|
chainer
|
chainer-master/chainer/link_hooks/__init__.py
|
from chainer.link_hooks.spectral_normalization import SpectralNormalization # NOQA
from chainer.link_hooks.timer import TimerHook # NOQA
from chainer.link_hooks.weight_standardization import WeightStandardization # NOQA
| 223
| 55
| 83
|
py
|
chainer
|
chainer-master/chainer/link_hooks/weight_standardization.py
|
import chainer
from chainer.functions.normalization import group_normalization
from chainer import link_hook
class WeightStandardization(link_hook.LinkHook):
"""Weight Standardization (WS) link hook implementation.
This hook standardizes a weight by *weight statistics*.
This link hook implements a WS which computes the mean and variance along
axis "output channels", then normalizes by these statistics.
WS improves training by reducing the Lipschitz constants of the loss and
the gradients like batch normalization (BN) but without relying on large
batch sizes during training. Specifically, the performance of WS with group
normalization (GN) trained with small-batch is able to match or outperforms
that of BN trained with large-batch.
WS is originally proposed for 2D convolution layers followed by mainly GN
and sometimes BN.
Note that this hook is able to handle layers such as N-dimensional
convolutional, linear and embedding layers but there is no guarantee that
this hook helps training.
See: Siyuan Qiao et. al., `Weight Standardization
<https://arxiv.org/abs/1903.10520>`_
Args:
eps (float): Numerical stability in standard deviation calculation.
The default value is 1e-5.
weight_name (str): Link's weight name to appky this hook. The default
value is ``'W'``.
name (str or None): Name of this hook. The default value is
``'WeightStandardization'``.
"""
name = 'WeightStandardization'
def __init__(self, *, eps=1e-5, weight_name='W', name=None):
self.eps = eps
self.weight_name = weight_name
self._initialized = False
if name is not None:
self.name = name
def __enter__(self):
raise NotImplementedError(
'This hook is not supposed to be used as context manager.')
def __exit__(self):
raise NotImplementedError
def added(self, link):
if not hasattr(link, self.weight_name):
raise ValueError(
'Weight \'{}\' does not exist!'.format(self.weight_name))
if getattr(link, self.weight_name).array is not None:
self._initialized = True
def forward_preprocess(self, cb_args):
# This method normalizes target link's weight by statistics
link = cb_args.link
input_variable = cb_args.args[0]
if not self._initialized:
if getattr(link, self.weight_name).array is None:
if input_variable is None:
raise ValueError('Input variable does not exist!')
link._initialize_params(input_variable.shape[1])
weight = getattr(link, self.weight_name)
with chainer.using_device(link.device):
gamma = link.xp.ones(
(weight.shape[1],), dtype=weight.dtype)
beta = link.xp.zeros(
(weight.shape[1],), dtype=weight.dtype)
# For link.W or equivalents to be chainer.Parameter
# consistently to users, this hook maintains a reference to
# the unnormalized weight.
self.original_weight = weight
# note: `normalized_weight` is ~chainer.Variable
normalized_weight = group_normalization.group_normalization(
weight, groups=1, gamma=gamma, beta=beta, eps=self.eps)
setattr(link, self.weight_name, normalized_weight)
def forward_postprocess(self, cb_args):
# Here, the computational graph is already created,
# we can reset link.W or equivalents to be Parameter.
link = cb_args.link
setattr(link, self.weight_name, self.original_weight)
| 3,678
| 40.806818
| 79
|
py
|
chainer
|
chainer-master/chainer/graph_optimizations/static_graph_utilities.py
|
import inspect
import chainer
def static_code(*dec_args, **dec_kwargs):
"""Decorator to mark a function for inclusion in the static schedule.
This decorator is used to mark a function or method to be included
in a static schedule. There are multiple types of static schedules, such
as "forward pass schedule", "backward pass schedule", "double backward
pass schedule" etc.. The type of schedule that the decorated function's
code is added to will depend on the context in which this decorator
is used. For example, the decorated code will be added to the
"forward pass schedule" if it is called while executing the define-by-
run code of a static subgraph. To inform the framework that a particular
portion of define-by-run code corresponds to a static subgraph, the
code should be placed inside the `__call__()` method of a chain and
then apply the `@static_graph` decorator to the `__call__()` method.
We will refer to such a chain as a "static chain."
This will cause any functions
decorated with `static_code` that are called while inside of `__call__()`
to be included in the forward pass static
schedule in the same order in which they were executed in the
define-by-run code.
Likewise, for any `FunctionNode` instances that are called inside
a static chain, any code that is run while inside the `backward()`
method that calls a function using this decorator will be added to
the corresponding "backward pass schedule."
Usage:
This decorator should be applied to any code called from a static chain
that needs to run each
iteration. This should only include the code that performs
the actual forward and/or backward computations and not include code
for initializing parameters, checking types, etc..
As long as a chain is marked as static, the framework
will automatically wrap any `FunctionNode` instances so that the
code inside their `forward()` and `backward()` methods is added to
the corresponding forward and backward static schedules, respectively.
As a result, any built-in Chainer function and
link calls will be automatically included in the static schedule.
However, there are two cases where the user will need to use this
decorator:
1. Code with side effects that is called from a static chain's define-by-
run code must be placed in a function decorated with `@static_code`.
2. Any user-defined links that contain code other chain Chainer
function calls that must run every iteration must place such code
in a function decorated with `@static_graph`.
This decorator can be applied to either a function or a method (usually
of a `FunctionNode`). There are no required arguments, and so a user can
apply it to "side effect" code to cause an operation to be executed each
iteration. The more usual use case is where the core framework code
will apply it to the all of (and only) the functions
that actually perform the computations needed to compute the forward
and backward passes.
The simplest usage is when we would like to force a particular
user-defined function to run each iteration. For example, such a function
might increment a counter, check conditions, and possibly print
information to the console. In this use, it is only required to add
this decorator to the function definition and then call it during
the first iteration from the context of the static chain's
`__call__()` method.
Passing and returing arrays:
If the function needs an array as an input argument that was
used elsewhere in the static schedule, it must appear as an
item in list of arrays that is supplied in the `inputs` keyword
argument. An example would be the typical case where one layer
in the network produces output activations `y` which are then
used as the input of the next layer. If the corresponding
`FunctionNode` instances wrap their computaions using this decorator,
this will result in multiple functions that operate on `y`.
The following constraints specify how
such arrays should be passed into and returned from a function
that uses this decorator.
If the function will return results in one or more arrays, there are
two options:
1. Write the results in-place into preallocated arrays that are
supplied in a list in the `outputs` keyword argument.
2. Dynamically allocate the result array(s) inside the function
and return them inside a tuple.
When two schedule functions
"func_A" and "func_B" operate on the same array `x`,
`x` must explicitly appear as an input argument and/or output
of both functions. For
example, it would be an error to have schedule function "func_A"
return a dynamically allocated array `x` and then have schedule
function "func_B" later
read from `x` without it appearing in "func_B"'s `inputs` list.
Note that this would work during the first iteration, but during
the next iteration when "func_A" is called, it would allocate and
return a new array for `x` leading to "func_B" reading from the
stale reference to `x` from the previous iteration. This
usage is allowed in some special cases by the framework code, but
is not allowed for user-defined functions.
Performance notes:
The function should return any output arrays in-place
into pre-allocated arrays (1. above) when possible since this this allows
the scheduler to make tradeoffs
between computation efficiency and memory usage.
For example, this allows the use of a
completely static array allocations (no allocations after the first
iteration), if desired. If memory reduction is needed, the
scheduler may choose to delete the arrays in `inputs` once they are no
longer
needed in an iteration and then reallocate them again in the next
iteration just before the function is called. Note that
completely static array allocations are not possible if
any of the schedule functions return a tuple of dynamically allocated
arrays, as the existing chainer functions do.
The following optional arguments apply to the wrapped function or method.
Args (of this decorater):
func_name (str): An optional descriptive name that will be associated
with this function in the static schedule. It is intended
for debugging purposes.
Args (of the wrapped fuction):
inputs (list of ndarray): An optional keyword argument that
supplies all arrays needed as input by the function. If the
function needs an array that is used by another function
in the static schedule, it must appear in this list.
outputs (list of ndarray): An optional keyword argument that
supplies all output arrays of this function. These arrays
must already have been initialized to the correct shapes
and dtypes before the function is called. The function
must write its results in-place into these arrays. Any
output arrays that may be used inside another schedule
function must appear in this list.
Returns:
None or a tuple of ndarray: If the function dynamically
allocates its output arrays, they must be returned in a tuple
of arrays.
"""
func_name = None
zero_args = False
if len(dec_args) == 1 and not dec_kwargs and callable(dec_args[0]):
callable_arg = dec_args[0]
zero_args = True
elif dec_kwargs:
if 'func_name' in dec_kwargs:
func_name = dec_kwargs['func_name']
def wrap(func):
def wrapped_func(*args, **kwargs):
# Save arguments, function, and results pointers/references
# to the schedule list:
# If trace mode is on, add to schedule.
schedule_function = chainer.config.schedule_func
if schedule_function is not None:
assert chainer.config.use_static_graph
# Note: 'ret = func(*args, **kwargs)' is called inside
# the following method.
ret = schedule_function.append_function(func, args, kwargs,
func_name=func_name)
# Add the schedule function as an attribute of the
# FunctionNode instance (or more generally, to any class)
# that contains the wrapped function as a method
if args:
instance = args[0]
if inspect.isclass(instance):
# note: this is not currently needed.
instance.schedule_func = schedule_function
else:
ret = func(*args, **kwargs)
return ret
return wrapped_func
if zero_args:
return wrap(callable_arg)
else:
return wrap
def static_forward_optimizations(func, inputs):
"""Perform checks needed for creation of a static schedule.
Check if `func` supports static graph optimizations. If not,
automatically wrap it to be compatible.
This function is called from the `FunctionNode` apply() method
in place of the original `func.forward(inputs)` call if
`chainer.config.schedule_func` is not None.
Args:
func (instance of FunctionNode):
inputs (tuple of ndarray): input arrays to `func`
Returns:
(tuple of ndarray): The outputs of the function.
"""
schedule_function = chainer.config.schedule_func
if not func._supports_static_optimizations:
if schedule_function.verbosity_level >= 2:
print('Adding automatic static graph support to '
'function: ', func)
@static_code(func_name=str(func))
def generic_static_forward(func, inputs):
"""Auto-wrap the supplied function.
func (instance of FunctionNode): The function to include in
the static schedule.
inputs (list of input arrays): The input arguments to `func`.
Returns: a tuple of output arrays.
"""
# Convert back to tuple because func.forward() requires it.
in_data = tuple(inputs)
ret = func.forward(in_data)
return ret
# Note: we convert inputs to a list because the API for
# static_code requires it.
return generic_static_forward(func, inputs=list(inputs))
return func.forward(inputs)
| 10,620
| 43.439331
| 77
|
py
|
chainer
|
chainer-master/chainer/graph_optimizations/__init__.py
|
from chainer.graph_optimizations.static_graph_utilities import static_code # NOQA
| 84
| 27.333333
| 82
|
py
|
chainer
|
chainer-master/chainer/graph_optimizations/static_graph.py
|
import sys
import weakref
import numpy as np
import chainer
from chainer.backends import cuda
import chainer.function_node
def _is_xp(x):
return isinstance(x, np.ndarray) or isinstance(x, cuda.ndarray)
class ScheduleInfo(object):
"""A callable wrapper for a function in the static schedule.
Args:
func (FunctionNode): A function in the static schedule.
args: Arguments to 'func'.
kwargs: Keyword arguments to 'func'.
inputs_hooks (list of tuples): A list of hooks that instruct how to
update the ndarray references in 'args' so that they
refer to the correct master array in 'unique_arrays'.
return_hooks (list of tuples): A list of hooks that instruct how
to update the ndarray references in 'unique_arrays' so that
they refer to the correct arrays that were dynamically
allocated and returned by 'func'. These run after
'func' is called.
unique_arrays (list of ndarray): The master list of all unique
ndarrays that appear in the static schedule.
func_name (str): An optional name of the static function. This is
the name (if any) that was used as a decorater argument to
`@static_code(func_name=name)`.
"""
def __init__(self, func, args, kwargs, inputs_hooks, outputs_hooks,
return_hooks, delete_hooks, unique_arrays, array_infos,
func_name=None):
self.func = func
self.args = args
self.kwargs = kwargs
self.inputs_hooks = inputs_hooks
self.outputs_hooks = outputs_hooks
self.return_hooks = return_hooks
self.unique_arrays = unique_arrays
self.array_infos = array_infos
assert len(self.array_infos) == len(self.unique_arrays)
self.func_name = func_name
self.in_list = None
if self.inputs_hooks:
self.in_list = self.kwargs['inputs']
if self.outputs_hooks:
self.out_list = self.kwargs['outputs']
# Check if 'func' wraps code of a 'FunctionNode':
self.function_node = None
if self.args:
maybe_func = self.args[0]
if isinstance(maybe_func, chainer.FunctionNode):
self.function_node = maybe_func
# List of indices in unique_arrays to delete.
self.delete_hooks = delete_hooks
def run_pre_hooks(self):
"""Run hooks to set correct references.
This method is called from '__call__()'.
Process the list of hooks which will modify the array references in
the arguments list of the static function. This method must be
called before executing the static function.
The hooks specify that
each array argument points to a "master" array reference in the
unique_arrays list. If the reference in unique_arrays changes, then
we must also change the corresponding array reference in the arguments
list. The hooks specify the mapping and this method updates the
references in args to the corresponding values from unique_arrays.
"""
for hook in self.inputs_hooks:
(ind, unique_ind) = hook
self.in_list[ind] = self.unique_arrays[unique_ind]
for hook in self.outputs_hooks:
(ind, unique_ind) = hook
self.out_list[ind] = self.unique_arrays[unique_ind]
for ind in self.delete_hooks:
self.unique_arrays[ind] = None
def run_post_hooks(self, return_arrays):
"""Run post-hooks.
This method should be called after calling the static function
`self.func(*self.args)`. This method sets any array references that
appear in `self.args` to None. This is safe because the master
array reference is still kept in `self.unique_arrays`.
Also, process the list of post-hooks which will modify the array
references in
the unique_arrays list to refer to the new dynamically-allocated arrays
that were returned by 'func'.
Args:
return_arrays (list of ndarray or None): The list of arrays that
were returned by the schedule function, if not None.
"""
for hook in self.inputs_hooks:
(ind, unique_ind) = hook
self.in_list[ind] = None
for hook in self.outputs_hooks:
(ind, unique_ind) = hook
self.out_list[ind] = None
for hook in self.return_hooks:
# Update the array reference in unique_arrays to refer to the
# array in the results array.
(ret_index, unique_list_index) = hook
# Note: input/output variables to a FunctionNode that are
# retained using retain_inputs() or retain_outputs() are
# not currently explicitly used as input arguments to the
# auto-wrapped functions, and so their corresponding array
# reference could be used inside a function wrapped with
# @static_code without the array explicitly appearing in the
# 'inputs' argument. It is therefore not safe to change the
# reference of such arrays, and so for them, we must be
# sure to copy the dynamically-allocated array into the
# same array that was used in the define-by-run code and
# set 'need_copy' to True in such cases.
need_copy = self.array_infos[unique_list_index].retain
# todo: possible memory leak when need_copy False is allowed?
if need_copy:
# This must be used if the model used retain_inputs() or
# retain_outputs().
self.unique_arrays[unique_list_index][...] = \
return_arrays[ret_index]
else:
# This is preferred, when possible, since it should
# be faster than a copy to simply update the array
# reference.
self.unique_arrays[unique_list_index] = \
return_arrays[ret_index]
def __call__(self):
self.run_pre_hooks()
ret = self.func(*self.args, **self.kwargs)
self.run_post_hooks(ret)
def __repr__(self):
out = 'function: ' + str(self.func) + '\n'
out += 'name: ' + str(self.func_name) + '\n'
out += 'args: ' + str(self.args) + '\n'
out += 'kwargs: ' + str(self.args) + '\n'
return out
class ArrayInfo(object):
"""Array information needed by the scheduler.
This contains information about one array used in the naive static
schedule corresponding to the define-by-run code.
"""
def __init__(self, array):
# Weak reference to the array in the define-by-run code.
self.weak_ref = weakref.ref(array)
self.id = id(array)
# The array (normal reference). Do not create in initializer.
self.array = None
self.shape = array.shape
self.dtype = array.dtype
# either numpy or cupy
self.ndarray_module = chainer.backend.get_array_module(array)
if self.ndarray_module is cuda.cupy:
# device id, if available.
self.device = cuda.get_device_from_array(array)
else:
# numpy (cpu)
self.device = -1
# todo: save array order ('C', 'F' as well?
# It specifies the input variable corresponding
# to this array as the tuple (pass_depth, in_var_index).
self.in_var_index = None
# It specifies the output variable corresponding
# to this array as the tuple (pass_depth, out_var_index).
self.out_var_index = None
# todo: set in initializer as keyword arg?
self.dynamically_allocated = False
# If the array was returned as a dynamically allocated array
# in the define-by-run code, this specifies the location
# in the schedule as the tuple (pass_depth, sched_func_index)
# where sched_func_index is the index of the corresponding
# ScheduleInfo object in the StaticScheduleFunction's
# self.schedule_info_list
self.dynamic_allocation_index = None
self.dynamic_allocation_pass_depth = None
self.dynamic_deletion_index = None
self.dynamic_deletion_pass_depth = None
# This is the same as self.dynamic_allocation_index, but for the
# case where the array was statically allocated in the
# define-by-run code.
self.static_allocation_index = None
# If the array needs to be retained (was included in
# retain_inputs/retain_outputs),
# this will be set to True later.
self.retain = False
def was_deleted(self):
return self.weak_ref() is None
def get_new_empty_array(self):
"""Make and return a new empty ndarray.
Make and return a new empty ndarray that has the same shape,
dtype, and device as the array that was supplied to the
initializer.
"""
# todo: set device id
return self.ndarray_module.empty(self.shape, dtype=self.dtype)
def __repr__(self):
out = 'shape: {}\n'.format(self.shape)
if self.was_deleted():
out += 'Weak reference: dead\n'
else:
out += 'Weak reference: alive\n'
if self.retain:
out += 'Retained with retain_inputs()/retain_outputs().\n'
if self.dynamically_allocated:
out += 'Dynamically allocated at\n'
out += \
' pass_depth: {}\n'.format(self.dynamic_allocation_pass_depth)
out += ' sched_index: {}\n'.format(self.dynamic_allocation_index)
out += 'array id: {}'.format(self.id)
return out
class StaticScheduleFunction(chainer.function_node.FunctionNode):
"""A function that executes the static schedule of a Chain.
An instance of this class executes the static schedule of computations
that are equivalent to executing the define-by-run code of a Chain.
This class is used by the `static_graph` decorator to wrap the
define-by-run
computations of a chain into two static schedules:
- The forward schedule corresponds to the computations that are executed by
the define-by-run code of the `__call__()` method of a chain. The static
schedule corresponding to these computations can be executed by calling the
`forward()` method of this class.
- The backward schedule corresponds to the computations that are executed
by the sequence of calls to `Function.backward()` that occur during when
backpropagating the gradients through the same chain. That is, for each
`Function.forward()` that was called during the forward propagation,
there will be a corresponding call to `Function.backward()` (of the
same Function object) in the backward schedule. This backward schedule
can be executed by calling the `backward()` method of this class.
Note the intended usage of this class:
Recall that a "static chain" referes to a chain that is decorated by the
`static_graph` decorator.
During the first forward pass of a static chain, the define-by-run code
is executed. However,
for subsequent iterations, that define-by-run code is replaced by an
instance
of this function and this function will be called instead. Since the
static
schedules contained by this function perform the same computations, it is
safe (and potentially much more efficient) to simply execute the static
schedule instead
of the define-by-run code. See `static_graph` for details.
Args:
schedule_manager (ScheduleManager): The schedule manager of this
schedule instance.
in_vars (tuple of Variable): The flattened tuple of input variables
that is supplied to
`__call__()` method of the chain that this schedule corresponds to.
unique_arrays (list of ndarray): A list of all unique array references
deeply used in an StaticScheduleFunction instance. It is 'None'
for the StaticScheduleFunction that corresponds to the "forward"
schedule, but the contained StaticScheduleFunction for the
"backward" schedule should take the unique_arrays of the
"forward" schedule.
"""
def __init__(self, schedule_manager, verbosity_level=0,
enable_double_backprop=False):
# A pass depth of 0 corresponds to the schedule for the forward pass.
# A pass depth of 1 corresponds to the schedule for the backward pass.
# A pass depth of 2 corresponds to the schedule for the
# double-backward pass, and so on.
self.pass_depth = 0
self.schedule_manager = schedule_manager
# A list of ScheduleInfo objects, each of which contains one function
# in the static schedule. The order of functions in this list is
# the order they should be called in the schedule.
self.schedule_info_list = []
# A list of all unique ndarrays used in this schedule and any deeply
# contained schedules (backward, double-backward schedules).
# That is, it is shared among all pass depths.
# Note that this typically includes the ndarray attributes of the
# parameters of the chain, the input variables to the chain,
# and any intermediate arrays (activations, etc) created while
# executing the define-by-run code of the chain.
self.unique_arrays = []
# A list of UniqueArray objects, where
# each object contains information such as what the array corresponds
# to (variable, parameter.data, etc), weak or regular reference,
# whether
# it was dynamically allocated or read-only in the schedule.
# It is the same length as unique_arrays.
self.unique_array_infos = []
# Maps id(ndarray) to its position in self.unique_arrays
# This is shared by this schedule and all deeply-contained schedules.
self.array_id_to_unique_index = dict()
self.backward_schedule_func = None
self.verbosity_level = verbosity_level
self.enable_double_backprop = enable_double_backprop
self.in_vars = None
self.chain = None
self.schedule_built = False
# A list of all parameters in the model (i.e., that exist when
# build_schedule() is called.
# This is shared among all deeply-contained schedules of this schedule.
self.params_list = []
# This list contains the grad_var corresponding to each variable
# in params_list. This is needed so that we can restore any grad_var
# that is set to None by outside code.
# This is shared among all deeply-contained schedules of this schedule.
self.grad_var_list = []
# Maps an array id (of a parameter) to its location.
# id(array) -> (index_in_self.params_list, attribute_location)
self.array_id_to_param_map = dict()
# Maps an array id (of an input variable for forward()) to its
# positional index.
# id(array) -> (index in inputs argument of forward())
self.array_id_to_input_var_map = dict()
# maps a Parameter id to the parameter's index in self.params_list
self.param_id_to_index = dict()
# A list of tuples that specify the mappings from static schedule
# arrays to parameter attributes.
# These are pre-hooks that are run before running the schedule.
self.param_hooks = []
# These are post hooks that are run after executing the schedule.
# They are used to update parameter attributes from dynamically-
# allocated arrays in the schedule.
self.param_post_hooks = []
# A list of tuples that specify the mappings from static schedule
# arrays to 'data' array attributes of the output variables.
self.out_var_hooks = []
# A list of tuples that specify the mapping from static schedule
# arrays to input variable index in the "inputs" argument of forward()
# This is used to update the array references in the static schedule
# that refer to the data attribute of input variables.
self.in_var_hooks = []
self.dynamically_allocated_unique_index = set()
# Maps an index in unique_arrays to the index in the returned
# output variables, if the index corresponds to an output
# variable.
self.unique_ind_to_out_var_ind = dict()
def get_unique_index_from_array(self, array):
"""Return the array index if it exists.
Return the index of the array in self.unique_array_infos if the
array already exists in self.unique_array_info with a valid
reference. Otherwise, return None.
"""
ar_id = id(array)
if ar_id in self.array_id_to_unique_index:
# It is possible that this id is stale if a previous
# array that had the same id has already been deleted.
# So, verify that the existing array with this id is
# still alive.
unique_ind = self.array_id_to_unique_index[ar_id]
info = self.unique_array_infos[unique_ind]
assert ar_id == info.id
if info.was_deleted():
# id was stale, so remove from the dict.
del self.array_id_to_unique_index[ar_id]
return None
else:
return self.array_id_to_unique_index[ar_id]
def get_contained_schedule(self):
# Make and return the backward schedule (relative to
# this schedule).
sched = StaticScheduleFunction(self.schedule_manager,
self.verbosity_level,
self.enable_double_backprop)
sched.pass_depth = self.pass_depth + 1
sched.unique_arrays = self.unique_arrays
sched.unique_array_infos = self.unique_array_infos
sched.array_id_to_unique_index = self.array_id_to_unique_index
sched.params_list = self.params_list
sched.grad_var_list = self.grad_var_list
sched.array_id_to_param_map = self.array_id_to_param_map
sched.param_hooks = self.param_hooks
sched.param_id_to_index = self.param_id_to_index
return sched
def is_empty(self):
"""Return True if this schedule is empty.
"""
return len(self.schedule_info_list) == 0
def append_function(self, func, args, kwargs, func_name=None):
"""Append a function to the static schedule.
Append a function `func` to the static schedule. `func` can
be any function that is decorated with `@static_code` and that
was called while executing the static chain's `__call___()`
method, which contains the define-by-run code. The code
in the `@static_code` decorator will call this method to
add the function to the schedule just after it executes in
the define-by-run code as follows:
`return_arrays = func(*args, **kwargs)`
During the next iteration when the static chain switches from define-
by-run to the static schedule, a corresponding `ScheduleInfo`
object will call `func` as above, except that the scheduler might
make modifications
to some of the arrays in `kwargs` before and after the function is
called to implement various memory optimizations.
Args:
func (function or method): The function to append to the schedule.
This is a function that was decorated with `@static_code`.
args: The arguments that were originally supplied to `func` in
the define-by-run code of the static chain.
kwargs: The keyword arguments that were originally supplied to
`func` in the define-by-run code of the static chain.
func_name (str): Optional name for `func`, for debugging
purposes.
return_arrays (tuple of ndarray) or None: The value that is
returned by `func`, if any.
"""
# Check previous function in the schedule, if available.
# Check the arrays in the retained inputs/outputs and force them
# to remain statically allocated in the schedule.
# ids of any retained arrays.
retained_ids = set()
last_sched_info_ind = len(self.schedule_info_list) - 1
if last_sched_info_ind >= 0:
prev_sched_info = self.schedule_info_list[last_sched_info_ind]
if prev_sched_info.function_node is not None:
# get retained inputs/outputs.
retained_in_vars = \
prev_sched_info.function_node.get_retained_inputs()
retained_out_vars = \
prev_sched_info.function_node.get_retained_outputs()
if (retained_in_vars is not None and
retained_out_vars is not None):
retained_vars = retained_in_vars + retained_out_vars
elif retained_in_vars is not None:
retained_vars = retained_in_vars
elif retained_out_vars is not None:
retained_vars = retained_out_vars
else:
retained_vars = None
if retained_vars is not None:
for var in retained_vars:
retained_ids.add(id(var.data))
for keep_id in retained_ids:
unique_ind = self.array_id_to_unique_index[keep_id]
array_info = self.unique_array_infos[unique_ind]
array_info.retain = True
# Note: the following line is not actually needed.
# array_info.array = array_info.weak_ref()
delete_hooks = []
for unique_ind, ar_info in enumerate(self.unique_array_infos):
# todo: this is O(N^2) and maybe too slow for large graphs.
# Optimize it later.
if ar_info.was_deleted():
if ar_info.dynamic_deletion_index is None:
if self.verbosity_level >= 2:
print('Adding delete hook:')
delete_hooks.append(unique_ind)
ar_info.dynamic_deletion_index = last_sched_info_ind + 1
ar_info.dynamic_deletion_pass_depth = self.pass_depth
# Call the `@static_code`-decorated function.
ret = func(*args, **kwargs)
inputs_hooks = []
if 'inputs' in kwargs:
in_list = kwargs['inputs']
assert isinstance(in_list, list)
for ind, x in enumerate(in_list):
if _is_xp(x):
unique_ind = self.get_unique_index_from_array(x)
if unique_ind is None:
# Note: we append None here because we cannot store any
# additional reference to the array.
# Otherwise, it would
# prevent garbage collection. Note that a
# weak reference
# will be stored in the ArrayInfo below.
self.unique_arrays.append(None)
self.unique_array_infos.append(ArrayInfo(x))
unique_ind = len(self.unique_arrays) - 1
self.array_id_to_unique_index[id(x)] = unique_ind
inputs_hooks.append((ind, unique_ind))
# Now that the hook has been added, we can delete
# array reference from 'args'.
in_list[ind] = None
outputs_hooks = []
if 'outputs' in kwargs:
out_list = kwargs['outputs']
assert isinstance(out_list, list)
for ind, x in enumerate(out_list):
if _is_xp(x):
unique_ind = self.get_unique_index_from_array(x)
if unique_ind is None:
self.unique_arrays.append(x)
# todo: enable the following line instead once the
# auto-intializing hooks are added. This will further
# reduce memory usage.
# self.unique_arrays.append(None)
self.unique_array_infos.append(ArrayInfo(x))
unique_ind = len(self.unique_arrays) - 1
self.array_id_to_unique_index[id(x)] = unique_ind
outputs_hooks.append((ind, unique_ind))
# Now that the hook has been added, we can delete
# array reference from 'args'.
out_list[ind] = None
# A list of hooks (each is a tuple) that will be used to set
# correct array references in 'unique_arrays' after executing
# the static schedule function 'func'. These hooks update
# the references in 'unique_arrays' to refer to the arrays
# that were dynamically allocated in the return value of
# 'func'.
return_hooks = []
if ret is not None:
assert (isinstance(ret, list) or
isinstance(ret, tuple))
for ret_index, item in enumerate(ret):
if _is_xp(item):
# note: id might not be unique if objects have been
# garbage collected.
item_id = id(item)
unique_index = self.get_unique_index_from_array(item)
if unique_index is None:
# Note: Append None instead of 'item' to prevent an
# extra reference from being stored. Otherwise it
# would prevent garbage collection.
self.unique_arrays.append(None)
ar_info = ArrayInfo(item)
ar_info.dynamically_allocated = True
sched_info_ind = len(self.schedule_info_list)
ar_info.dynamic_allocation_index = sched_info_ind
ar_info.dynamic_allocation_pass_depth = self.pass_depth
self.unique_array_infos.append(ar_info)
unique_index = len(self.unique_arrays) - 1
self.array_id_to_unique_index[item_id] = \
unique_index
else:
# Since all of the return arrays are supposed to
# have been dynamically allocated inside 'func',
# they had better not already be in unique_arrays.
# If so, it is an error.
unique_index = self.array_id_to_unique_index[item_id]
print('the current id: ', item_id)
print('the unique_index: ', unique_index)
print('array info: ',
self.unique_array_infos[unique_ind])
raise RuntimeError('Found result array from schedule '
'function already in '
'unique_arrays!')
return_hooks.append((ret_index, unique_index))
self.dynamically_allocated_unique_index.add(unique_index)
if self.verbosity_level >= 2:
print('Adding function to static schedule: ', func)
self.schedule_info_list.append(ScheduleInfo(func, args, kwargs,
inputs_hooks,
outputs_hooks,
return_hooks,
delete_hooks,
self.unique_arrays,
self.unique_array_infos,
func_name=func_name))
return ret
def __repr__(self):
out = 'StaticSchedule:\n'
if self.pass_depth == 0:
depth = 'forward pass'
elif self.pass_depth == 1:
depth = 'backward pass'
elif self.pass_depth == 2:
depth = 'double backward pass'
else:
depth = str(self.pass_depth)
out += 'Pass depth: ' + depth + '\n'
out += 'Length of unique_arrays: ' + \
str(len(self.unique_arrays)) + '\n'
for x in self.schedule_info_list:
out += str(x)
return out
def debug_print_ref_counts(self):
print('reference counts in unique_arrays:')
for ind in range(len(self.unique_arrays)):
print('index: ', ind)
print('reference count: ',
sys.getrefcount(self.unique_arrays[ind]))
def run_param_pre_hooks(self):
"""Run parameter reference updater hooks.
It also handles the case where the 'grad' attribute
was set to 'None' by outside Chainer code.
"""
for hook in self.param_hooks:
(unique_array_index, param_attribute_location) = hook
(params_list_index, attribute_location) = param_attribute_location
if attribute_location == 'data':
# This is the corresponding parameter array, which might
# have had its reference changed to a different array or set
# to None.
self.unique_arrays[unique_array_index] = \
self.params_list[params_list_index].data
elif attribute_location == 'grad':
# This is the corresponding parameter array, which might
# have had its reference changed to a different array or set
# to None.
self.params_list[params_list_index].grad = \
self.unique_arrays[unique_array_index]
def run_param_post_hooks(self):
"""Update parameter attributes after schedule is executed.
If any dynamically-allocated arrays in the schedule correspond to
a parameter attribute, it must be updated after the schedule is
run.
"""
if self.verbosity_level >= 2:
print('run_param_post_hooks()...')
for hook in self.param_post_hooks:
(unique_array_index, param_attribute_location) = hook
(params_list_index, attribute_location) = param_attribute_location
if attribute_location == 'data':
self.params_list[params_list_index].data = \
self.unique_arrays[unique_array_index]
elif attribute_location == 'grad':
self.params_list[params_list_index].grad = \
self.unique_arrays[unique_array_index]
def run_in_var_hooks(self, input_var_arrays):
"""Run hooks to update variable array references.
Args:
input_var_arrays (tuple of ndarray): The 'data' array attributes
of the input variables to this function.
"""
for hook in self.in_var_hooks:
(unique_array_index, in_var_ind) = hook
if self.verbosity_level >= 2:
print('input var hook:')
print('unique_array_index: ', unique_array_index)
print('in_var_ind: ', in_var_ind)
print('_run_in_var_hooks(): Using this input variable array '
'for forward pass: ', input_var_arrays[in_var_ind])
self.unique_arrays[unique_array_index] = \
input_var_arrays[in_var_ind]
def debug_print_unique_arrays_info(self):
for ind, item in enumerate(self.unique_arrays):
print('--- unique_arrays ---')
print('index: {0}; id: {1}'.format(ind, id(item)))
if item is not None:
print('shape: ', item.shape)
if ind in self.unique_ind_to_out_var_ind:
out_var_ind = self.unique_ind_to_out_var_ind[ind]
print('output variable at return index: ', out_var_ind)
if ind in self.dynamically_allocated_unique_index:
print('Dynamically allocated inside schedule.')
def run_out_var_hooks(self):
"""Run hooks to update output variable array references.
"""
for hook in self.out_var_hooks:
(out_var_ind, unique_list_index) = hook
out_var = self.out_vars[out_var_ind]
out_var.data = self.unique_arrays[unique_list_index]
if self.verbosity_level >= 2:
print('StaticScheduleFunction: running output variable hook: '
'out_var_ind, unique_list_index): ', hook)
def set_out_variables(self, out_vars):
"""Set output variables.
This should be called after the define-by-run code in the
chain's `__call__()` has already run but before running the
static schedule.
Args:
out_vars (list of Variable): The (flattened) list of output
variables obtained by performing a define-by-run
forward pass (or corresponding backward pass) on the
local sub-graph corresponding to the static chain.
"""
self.out_vars = out_vars
# Create output-variable update hooks.
for var_ind, var in enumerate(out_vars):
if var is not None:
key = id(var.data)
if key in self.array_id_to_unique_index:
unique_list_index = self.array_id_to_unique_index[key]
self.out_var_hooks.append((var_ind, unique_list_index))
self.unique_ind_to_out_var_ind[unique_list_index] = var_ind
else:
raise RuntimeError('Could not find output variable in '
'unique_arrays.')
def build_schedule(self, chain, in_vars):
"""Build the static schedule.
Perform one-time post-processing on the functions and arguments
that were
previously supplied in 'append_function()' to create the static
schedule.
This method must be called after the final call of 'append_function()'
and before calling 'forward()' for the first time.
Args:
chain: The static chain that uses this scheudle.
in_vars (list of Variable): The input variables to this static
schedule. This are the input variables (each having no
creator) of the local sub-graph corresponding to the
static chain.
"""
self.chain = chain
self.in_vars = in_vars
# Iterate through all array info objects and for any arrays that
# still have a valid reference, copy into unique_arrays.
if self.verbosity_level >= 2:
print('Building schedule for pass depth: ', self.pass_depth)
for ind, info in enumerate(self.unique_array_infos):
if self.verbosity_level >= 2:
print('unique array index: ', ind)
print('array info: ', info)
if not info.was_deleted():
self.unique_arrays[ind] = info.weak_ref()
# Verify that all array references are actually unique.
unique_ids = set()
for ar in self.unique_arrays:
if ar is not None:
assert id(ar) not in unique_ids
unique_ids.add(id(ar))
for param in chain.params():
param_key = id(param)
if param_key not in self.param_id_to_index:
self.params_list.append(param)
grad_var = param.grad_var
self.grad_var_list.append(grad_var)
param_index = len(self.params_list) - 1
self.param_id_to_index[param_key] = param_index
else:
# We have seen this parameter before.
param_index = self.param_id_to_index[param_key]
grad_var = param.grad_var
self.grad_var_list[param_index] = grad_var
if param.data is not None:
key = id(param.data)
if key not in self.array_id_to_param_map:
self.array_id_to_param_map[key] = (param_index, 'data')
if param.grad is not None:
key = id(param.grad)
if key not in self.array_id_to_param_map:
self.array_id_to_param_map[key] = (param_index, 'grad')
for var_ind, in_var in enumerate(self.in_vars):
assert in_var.data is not None
key = id(in_var.data)
self.array_id_to_input_var_map[key] = var_ind
# Iterate over all arrays used in the schedule and check which ones
# correspond to parameter arrays or input variables. When a match
# is found, create a corresponding hook function. This hook will
# run just before executing the schedule and set the array
# references used in the schedule to be consistent with the
# input variables and parameters.
assert len(self.unique_arrays) > 0
for unique_array_index, ar in enumerate(self.unique_arrays):
key = id(ar)
# Create pre-run parameter hooks.
if key in self.array_id_to_param_map:
param_attribute_location = self.array_id_to_param_map[key]
param_hook = (unique_array_index, param_attribute_location)
self.param_hooks.append(param_hook)
# Create pre-run input variable hooks.
if key in self.array_id_to_input_var_map:
in_var_ind = self.array_id_to_input_var_map[key]
in_var_hook = (unique_array_index, in_var_ind)
self.in_var_hooks.append(in_var_hook)
if self.verbosity_level >= 2:
print('build_schedule(): Adding input variable hook: ',
in_var_hook)
print('For input variable: ', ar)
# Create post-run hooks for any arrays that are dynamically
# allocated inside the schedule.
if unique_array_index in self.dynamically_allocated_unique_index:
if key in self.array_id_to_param_map:
param_attribute_location = self.array_id_to_param_map[key]
param_hook = (unique_array_index, param_attribute_location)
self.param_post_hooks.append(param_hook)
if self.verbosity_level >= 2:
print('self.param_hooks: ', self.param_hooks)
self.debug_print_unique_arrays_info()
# todo: We can potentially reduce memory usage by freeing memory
# of intermediate arrays in self.unique_arrays
# once they are no longer needed in the schedule or by
# parameters.
print('end of build_schedule()')
self.schedule_built = True
def forward(self, inputs):
if self.verbosity_level >= 2:
print('Calling StaticScheduleFunction.forward()...')
# Note: This method will be invoked every iteration starting
# from the second
# iteration. That is because the corresponding define-by-run
# code runs instead
# during the first iteration.
if not self.schedule_built:
raise RuntimeError('forward() was called before '
'build_schedule()!')
self.run_param_pre_hooks()
self.run_in_var_hooks(inputs)
if self.verbosity_level >= 2:
print('Running static schedule...')
# Run each function in the static schedule.
for x in self.schedule_info_list:
x()
if self.verbosity_level >= 2:
self.debug_print_unique_arrays_info()
self.run_out_var_hooks()
self.run_param_post_hooks()
ret = []
for y in self.out_vars:
if y is None or y.data is None:
ret.append(None)
else:
# todo: add test case for an example where the following
# copy is required (evaluation mode, repeated calls of
# chain that reuse same schedule).
ret.append(y.data.copy())
return tuple(ret)
def backward(self, target_input_indexes, grad_outputs):
if self.verbosity_level >= 2:
print('Calling StaticScheduleFunction.backward()...')
# The first time this method is called, the define-by-run code is
# executed in order to create a static schedule.
self.schedule_manager.end_forward()
if self.backward_schedule_func is None:
print('Creating new backward schedule...')
# Create backward schedule and run define-by-run backward code.
self.backward_schedule_func = self.get_contained_schedule()
# Make local copies of the variables in grad_outputs.
new_grad_outputs = []
for var in grad_outputs:
# Replace each input variable with a new variable having
# the same data.
new_grad_outputs.append(chainer.Variable(var.data))
with chainer.using_config('schedule_func',
self.backward_schedule_func):
with chainer.using_config('enable_backprop', True):
for ind, var in enumerate(new_grad_outputs):
# todo: possibly don't need the following:
self.out_vars[ind].grad = new_grad_outputs[ind].data
inputs = [param for param in self.chain.params()]
for var in self.in_vars:
inputs.append(var)
# Need shorter var to avoid "line too long error"
ugh = self.enable_double_backprop
chainer.grad(self.out_vars,
inputs,
grad_outputs=new_grad_outputs,
set_grad=True,
enable_double_backprop=ugh)
# We no longer need the backward graph from self.out_vars, so
# unchain them.
# todo (vogel): enable this eventually. For now, it
# causes some needed variables to be set to None
# in some models such as CIFAR example.
# for var in self.out_vars:
# var.unchain_backward()
# Note: var.grad_var is allowed to be None below:
backward_out_vars = [var.grad_var for var in self.in_vars]
self.backward_schedule_func.set_out_variables(backward_out_vars)
for n in range(len(self.in_vars)):
self.in_vars[n] = None
if self.verbosity_level >= 2:
print('building backward schedule.')
self.backward_schedule_func.build_schedule(self.chain,
new_grad_outputs)
return self.backward_schedule_func.apply(grad_outputs)
class ScheduleManager(object):
"""A manager of static schedules for a static chain.
This is a container of the static schedules that are used by a static
chain.
Args:
minimize_cache_size (bool): If `True`, attempt to reduce memory
usage by clearing the cached schedules whenever the training
mode changes (that is, whenever `chainer.config.train` changes
value) or whenever the mini-batch size changes.
"""
def __init__(self, minimize_cache_size=True, verbosity_level=0):
# Maps a key string to a list of schedule functions.
self.schedules = dict()
self.minimize_cache_size = minimize_cache_size
self.in_use_count = dict()
self.forward_over = False
self.prev_train_config = None
self.max_in_use_train = 0
self.train_count = 0
self.verbosity_level = verbosity_level
def get_schedule(self, in_vars, enable_double_backprop=False):
"""Get a static schedule.
Return a static schedule object (that is, an instance of
``StaticScheduleFunction``) that is compatible with
the current configuration and input variables to the supplied chain.
If there is no existing schedule available, return an empty schedule
object.
During the usual "training mode" (that is, when both
`chainer.config.enable_backprop` and `chainer.config.train`
are `True`), this method will always return a distince static
schedule each time it is called within the same iteration.
It will also try to reuse
existing schedules across iterations. Therefore, any schedule that
is returned in a given iteration cannot be returned again until
the following iteration. However, if either of these flags is
'False', then this method may return the same schedule instance
multiple times within the same iteration, as long as it is
compatible with `in_vars`.
Note that in order to implement the above behavior, the schedule
manager must be informed when the current iteration has finished.
This is accomplished by calling `end_forward()` after the
iteration has finished. If a backward pass is performed, then
`end_forward()` will be automatically called. Otherwise, it
will not be called and the user will be responsible for calling
it.
Args:
in_vars (tuple of :class:`~chainer.Variable`): The input
variables to the chain.
Returns:
An instance of ``StaticScheduleFunction``.
"""
if self.forward_over:
self.forward_over = False
if self.minimize_cache_size:
if chainer.config.train != self.prev_train_config:
# Training config changed, so clear caches.
self.prev_train_config = chainer.config.train
if self.verbosity_level >= 2:
print('Clearing schedule cache...')
self.schedules.clear()
self.in_use_count.clear()
if (chainer.config.train is False or
chainer.config.enable_backprop is False):
key_str = 'test:' + \
''.join(str(x.shape) + str(x.dtype) for x in in_vars)
# If the maximum number of in-use schedules in any iteration
# during training mode was exactly 1, assume it should also
# be 1 for test mode.
if key_str in self.schedules:
sched_list = self.schedules[key_str]
sched = sched_list[0]
else:
# avoid "line too long":
vb = self.verbosity_level
edb = enable_double_backprop
sched = StaticScheduleFunction(self,
verbosity_level=vb,
enable_double_backprop=edb)
self.schedules[key_str] = [sched]
return sched
else:
key_str = 'train:' + \
''.join(str(x.shape) + str(x.dtype) for x in in_vars)
self.train_count += 1
if key_str in self.schedules:
sched_list = self.schedules[key_str]
available_index = self.in_use_count[key_str]
if available_index >= len(sched_list):
# avoid "line too long":
vb = self.verbosity_level
edb = enable_double_backprop
sched = StaticScheduleFunction(self,
verbosity_level=vb,
enable_double_backprop=edb)
sched_list.append(sched)
sched = sched_list[available_index]
self.in_use_count[key_str] = available_index + 1
else:
# avoid "line too long":
vb = self.verbosity_level
edb = enable_double_backprop
sched = StaticScheduleFunction(self,
verbosity_level=vb,
enable_double_backprop=edb)
self.schedules[key_str] = [sched]
self.in_use_count[key_str] = 1
return sched
def end_forward(self):
"""Make in-use schedules available for use in next iteration.
Set the in-use status of all schedules to "not in use" so that
they can be reused in the next iteration.
In the case that test mode is active
(`chainer.config.train` is `False`) and the static chain corresponding
to this manager was not called more than once in any iteration during
training mode, then this method will be called automatically.
"""
if not self.forward_over:
for key in self.in_use_count:
self.in_use_count[key] = 0
self.forward_over = True
if self.train_count > self.max_in_use_train:
self.max_in_use_train = self.train_count
if self.verbosity_level >= 2:
print('Maximum in-use schedules per training iteration: ',
self.max_in_use_train)
self.train_count = 0
def __repr__(self):
out = 'ScheduleManager:\n'
for key_str in self.schedules:
out += 'key string: ' + key_str
sched_list = self.schedules[key_str]
out += ' -> schedule list of length: ' + \
str(len(sched_list)) + '\n'
for sched in sched_list:
out += str(sched)
return out
def static_graph(*args, **kwargs):
"""Decorator to mark a Chain's ``__call__()`` as a static sub-graph.
This decorator marks the define-by-run code inside the `__call__()`
method of a Chain instance as corresponding to a static computation
graph or sub-graph. Such a chain will be referred to as a 'static chain'.
This allows various "static graph" optimizations to be performed, which
can result in significant speedups for some models.
When this decorator is used, the chain's define-by-run code executes
during the first iteration as usual. However, while the define-by-run
code is executing, a trace is also performed to incrementally create a
corresponding static schedule. This static schedule will only contain
the subset of the computations inside the define-by-run code that actually
needs to run every iteration. Specifically, this will contain the code
inside any functions called that were annotated with the `@static_code`
decorator, which will include all Chainer built-in functions, as well as
any user-defined functions that use `@static_code`. Then, starting
from the second iteration, when the static chain is called, its
static schedule code will be executed instead of its define-by-run code.
However, the user must also be careful of the following:
- The user is responsible for applying this decorator correctly. The
framework
does not check that the define-by-run code corresponds to a static
graph. The graph can be different between training and
evaluation mode (such as when dropout and/or batch normalization are
used), but should otherwise be static.
- When `chainer.config.enable_backprop` is enabled, if a backward pass
is not performed each iteration, then the user code must call a method
`chain.schedule_manager.end_forward()`on the static chain each iteration.
- Static graphs allow tradeoffs between computation and memory usage.
For example, the `minimize_cache_size` argument will typically result in
higher memory usage when set to `False` because all cached schedules
are retained.
- When this feature is enabled, only the Chainer function and/or link
calls inside the chain's `__call__()` method will be included in the
static schedule by default. An other code that the user puts in
`__call__()`, such as a print statement or code to increment a counter
for example, will not automatically get added. We will refer to such
code other than Chainer function/link calls as "side-effect" code.
Since side-effect code does not get included in the static schedule
by default, this means that it will only every execute once, during
the first iteration. There is a way to force side-effect code to be
included in the static schedule, however: the user can wrapp such
code inside a function that is decorated with
`@static_code` to ensure that it gets added to the static schedule.
For an example of this, refer to the documentation.
- This feature is experimental and advanced optimizations such
as kernel fusion and various memory optimizations are not implemented
yet.
Usage:
This decorator should only be applied
to define-by-run code that actually corresponds to a static subgraph.
Refer to the documenation for additional details and examples of
correct usage.
This decorator should be applied to each of the largest static
subgraphs in the model; it can also be applied to a static subgraph
that is not the largest subgraph, but that could result in reduced
performance.
It is not currently allowed to
mark a chain as static if it is contained within
another chain that is also marked as being static.
For example, suppose a
static graph `A` contains a static sub-graph `B`. Then, only the chain
corresponding to `A` should be marked as static and the chain
corresponding
to `B` should not be marked as static.
The behavior of a static chain depends on the training mode flag,
`chainer.config.train`. If it is `True`, then a static chain that is
called multiple times will try to use a distinct static schedule object
(that is, call a distinct instance of a FunctionNode that implements
that static schedule) on each call. The same schedule instance cannot
be reused until the forward pass has completed, which is signaled by
performing a backward pass through the model. It is therefore important
that the backward pass be performed after each forward pass during
training. Since this is usually the case, most usages of static chain
will not required any modifications to existing code other than applying
this decorator. However, if you would like to perform multiple forward
passes during training before performing a backward pass, then you
must call `chain.schedule_manager.end_forward()` after the end
of each forward pass.
If test mode is active (`chainer.config.train` is `False`) then it
is not necessary to inform the chain at the end of each forward pass
because in test mode, a static chain always attempts to reuse
existing static schedule objects. The same static schedule can be reused
during a single forward pass, because it is not necessary to compute
gradients.
It is also possible to disable static optimzations while in test mode by
setting the decorator argument `force_test_define_by_run=True`.
Note: If either 'chainer.config.enable_backprop' or 'chainer.config.train'
is set to 'False', then cached static schedules will be reused when
possible to reduce memory usage.
Double-backprop:
Double-backpropagation is not enabled by default. It can be enabled by
supplying the keyword argument ``enable_double_backprop=True``
to this decorator. Note: this feature has not been tested yet.
Restrictions on input arguments and return values of a static chain:
Recall that unlike a function, there is no restrictions on the
arguments to a chain. However, there currently are some restrictions
when a static chain is used. Specifically, the arguments to a static
chain must consist of a variable, list or tuple. In the case of a list
or tuple, the elements are required to be an instance of variable,
list, or tuple. There can be an arbitrary number of nested lists/
tuples. No other object types are allowed.
In addition, keyword arguments are not allowed.
The return value of a static chain must be a
variable, list, or tuple in which each element of the list or
tuple is also a variable, list, or tuple.
This decorator can be supplied with the following optional keyword
arguments. This is an experimental feature, and the API and arguments
might change
Args:
force_test_define_by_run (bool): If `True`, disable static graph
optimizations during test mode (that is, when
`chainer.config.train` is False). This may be needed in order
for some existing RNN links such as LSTM to work correctly,
since some existing links do not correspond to a static graph
in some cases.
The default is `False`.
minimize_cache_size (bool): If `True`, minimize the number of cached
static schedules in order to reduce memory usage. For example,
if the mini-batch size changes or the training mode changes,
the schedules will need to be recomputed, but memory is also
saved by not retaining all cached schedules.
The default value is `True`.
verbosity_level (int): Depending on the value, print additional
information:
0: Warnings only. (the default value)
1: Show only information that is collected during the first
iteration and when a new static schedule is created.
2: Detailed debugging information, possibly showing new
information every iteration.
enable_double_backprop (bool): If `True`, enable double-backprop.
The default value is `False` (not enabled).
Returns:
Wrapped ``__call__()`` method with static chain support.
"""
# todo: consider to allow nested use of this decorator.
force_test_define_by_run = False
# todo: enable eventually
minimize_cache_size = False
verbosity_level = 0
enable_double_backprop = False
zero_args = False
if len(args) == 1 and not kwargs and callable(args[0]):
callable_arg = args[0]
zero_args = True
elif kwargs:
if 'force_test_define_by_run' in kwargs:
force_test_define_by_run = kwargs['force_test_define_by_run']
if 'minimize_cache_size' in kwargs:
minimize_cache_size = kwargs['minimize_cache_size']
if 'verbosity_level' in kwargs:
verbosity_level = kwargs['verbosity_level']
if 'enable_double_backprop' in kwargs:
enable_double_backprop = kwargs['enable_double_backprop']
def wrap(func):
def wrapped_func(*inner_args, **inner_kwargs):
# The static subgraph optimization feature can be turned off using
# a configuration, in which case this decorator merely calls the
# wrapped function without introducing any side effects.
if not chainer.config.use_static_graph:
return func(*inner_args, **inner_kwargs)
if verbosity_level >= 2:
print('Calling static chain...')
chain = inner_args[0]
# The arguments to `__call__()` of the static chain.
# These could consist of any combination of nested lists and/or
# tuples of variables or arrays.
chain_args = inner_args[1:]
if chainer.config.train is False and force_test_define_by_run:
return func(*inner_args, **inner_kwargs)
chain_args_flat, in_unflatten_inds, __ = _flatten_args(chain_args)
# Since it is allowed for in_vars to be either variables or arrays,
# we force to variables.
flat_vars = []
for x in chain_args_flat:
# This assumes x is either a variable or ndarray.
# todo: check this and handle case when it is not.
if not isinstance(x, chainer.Variable):
flat_vars.append(chainer.Variable(x))
else:
flat_vars.append(x)
flat_vars = tuple(flat_vars)
if not hasattr(chain, 'schedule_manager'):
chain.schedule_manager = ScheduleManager(
minimize_cache_size=minimize_cache_size,
verbosity_level=verbosity_level)
schedule_manager = chain.schedule_manager
# To prevent "line too long" error
edb = enable_double_backprop
chain.static_schedule = \
schedule_manager.get_schedule(flat_vars,
enable_double_backprop=edb)
if verbosity_level >= 2:
print('Current schedule manager info: ', schedule_manager)
if not chain.static_schedule.is_empty():
# Call the static schedule code.
if verbosity_level >= 2:
print('This is the 2nd or greater iteration. Calling '
'the existing static schedule...')
chain.static_schedule.debug_print_ref_counts()
out_vars_flat = chain.static_schedule.apply(flat_vars)
out_vars = _unflatten_args(out_vars_flat,
chain._out_vars_unflatten_inds)
else:
# This is the first iteration. Calling the define-by-run code.
assert isinstance(chain, chainer.Chain)
if verbosity_level >= 2:
print('This is the first iteration. Calling the '
'define-by-run code.: ', func)
# First check that this chain is not called from inside another
# static chain because it is not allowed.
if chainer.config.schedule_func is not None:
raise RuntimeError('Not allowed to nest static chains: ',
chain)
new_args = []
new_args.append(chain)
new_flat_vars = []
for var in flat_vars:
# Replace each input variable with a new variable having
# the same data. This is needed so that the chain-local
# computation graph will be rooted at the input variables.
new_flat_vars.append(chainer.Variable(var.data))
unflat_in_args = _unflatten_args_as_list(new_flat_vars,
in_unflatten_inds)
for item in unflat_in_args:
new_args.append(item)
inner_args = tuple(new_args)
with chainer.using_config('schedule_func',
chain.static_schedule):
# Execute the chain's call() method. As the define-by-run
# code executes, the static schedule is constructed.
out_vars = func(*inner_args, **inner_kwargs)
out_vars_flat_dbr, chain._out_vars_unflatten_inds, __ = \
_flatten_args(out_vars)
sched_out_vars = list(out_vars_flat_dbr)
chain.static_schedule.set_out_variables(sched_out_vars)
# Mark the static schedule as complete.
chain.static_schedule.build_schedule(chain, new_flat_vars)
# Now that the static schedule is available, call it using the
# flattened input variables. This will cause the
# static schedule function node to be included in the
# computational graph.
out_vars_flat = chain.static_schedule.apply(flat_vars)
out_vars = _unflatten_args(out_vars_flat,
chain._out_vars_unflatten_inds)
if verbosity_level >= 2:
print('Returing from 1st call of the static chain.')
return out_vars
return wrapped_func
if zero_args:
return wrap(callable_arg)
else:
return wrap
def _flatten_args(xs):
"""Flatten the input into a tuple of variables.
In the typical case, `xs` is a tuple or list of objects where each
object is either a variable, list, or tuple. In the case where it is
a list of tuple, the objects in the list or tuple could also be either
a variable, list or tuple. Although the non-list and non-tuple items
are typically an instance of variable, any object other than list or
tuple is allowed.
This function simply flattens the hierarchical lists/tuples so that all
objects that are deeply contained in `xs` that are non-list and non-tuple
will be returned in a single tuple.
Args:
xs:
Returns:
The flattened tuple, allong with the indecies and count so that the
items can be unflattened later (i.e., by calling `_unflatten_args()`.
fixme: does not work if xs is a variable only.
"""
inds = []
ys = []
i = 0
if not isinstance(xs, (list, tuple)):
inds.append(('s', ))
return (xs,), inds, 0
for x in xs:
if isinstance(x, (list, tuple)):
x, sub_inds, total = _flatten_args(x, )
inds.append(('i', i, i+total, sub_inds))
i += total
else:
x = [x]
inds.append(('f', i))
i += 1
ys.extend([y for y in x])
return tuple(ys), inds, i
# todo: this only outputs tuples of tuples. Any list in the original input
# will be converted to a tuple, changing the types of the input arguments
# to the static chain.
def _unflatten_args(xs, inds):
ys = []
for ind in inds:
code = ind[0]
if code == 's':
return xs[0]
elif code == 'i':
i_start, i_end, sub_inds = ind[1:]
y = _unflatten_args(xs[i_start:i_end], sub_inds)
else:
i = ind[1]
y = xs[i]
ys.append(y)
return tuple(ys)
def _unflatten_args_as_list(xs, inds):
ys = []
for ind in inds:
code = ind[0]
if code == 's':
return xs[0]
elif code == 'i':
i_start, i_end, sub_inds = ind[1:]
y = _unflatten_args(xs[i_start:i_end], sub_inds)
else:
i = ind[1]
y = xs[i]
ys.append(y)
return ys
| 67,103
| 44.617947
| 79
|
py
|
chainer
|
chainer-master/chainer/datasets/text_dataset.py
|
import io
import sys
import threading
import six
from chainer.dataset import dataset_mixin
class TextDataset(dataset_mixin.DatasetMixin):
"""Dataset of a line-oriented text file.
This dataset reads each line of text file(s) on every call of the
:meth:`__getitem__` operator.
Positions of line boundaries are cached so that you can quickliy
random access the text file by the line number.
.. note::
Cache will be built in the constructor.
You can pickle and unpickle the dataset to reuse the cache, but in
that case you are responsible to guarantee that files are not
modified after the cache has built.
Args:
paths (str or list of str):
Path to the text file(s).
If it is a string, this dataset reads a line from the text file
and emits it as :class:`str`.
If it is a list of string, this dataset reads lines from each
text file and emits it as a tuple of :class:`str`. In this case,
number of lines in all files must be the same.
encoding (str or list of str):
Name of the encoding used to decode the file.
See the description in :func:`open` for the supported options and
how it works.
When reading from multiple text files, you can also pass a list of
:class:`str` to use different encoding for each file.
errors (str or list of str):
String that specifies how decoding errors are to be handled.
See the description in :func:`open` for the supported options and
how it works.
When reading from multiple text files, you can also pass a list of
:class:`str` to use different error handling policy for each file.
newline (str or list of str):
Controls how universal newlines mode works.
See the description in :func:`open` for the supported options and
how it works.
When reading from multiple text files, you can also pass a list of
:class:`str` to use different mode for each file.
filter_func (callable):
Function to filter each line of the text file.
It should be a function that takes number of arguments equals to
the number of files. Arguments are lines loaded from each file.
The filter function must return True to accept the line, or
return False to skip the line.
"""
def __init__(
self, paths, encoding=None, errors=None, newline=None,
filter_func=None):
if isinstance(paths, six.string_types):
paths = [paths]
elif not paths:
raise ValueError('at least one text file must be specified')
if isinstance(encoding, six.string_types) or encoding is None:
encoding = [encoding] * len(paths)
if isinstance(errors, six.string_types) or errors is None:
errors = [errors] * len(paths)
if isinstance(newline, six.string_types) or newline is None:
newline = [newline] * len(paths)
if not (len(paths) == len(encoding) == len(errors) == len(newline)):
raise ValueError(
'length of each option must match with the number of '
'text files to read')
self._paths = paths
self._encoding = encoding
self._errors = errors
self._newline = newline
self._fps = None
self._open()
# Line number is 0-origin.
# `lines` is a list of line numbers not filtered; if no filter_func is
# given, it is range(linenum)).
# `bounds` is a list of cursor positions of line boundaries for each
# file, i.e. i-th line of k-th file starts at `bounds[k][i]`.
linenum = 0
lines = []
bounds = tuple([[0] for _ in self._fps])
while True:
data = [fp.readline() for fp in self._fps]
if not all(data): # any of files reached EOF
if any(data): # not all files reached EOF
raise ValueError(
'number of lines in files does not match')
break
for i, fp in enumerate(self._fps):
bounds[i].append(fp.tell())
if filter_func is not None and filter_func(*data):
lines.append(linenum)
linenum += 1
if filter_func is None:
lines = six.moves.range(linenum)
self._bounds = bounds
self._lines = lines
self._lock = threading.Lock()
def __getstate__(self):
state = self.__dict__.copy()
del state['_fps']
del state['_lock']
return state
def __setstate__(self, state):
self.__dict__ = state
self._open()
self._lock = threading.Lock()
def __len__(self):
return len(self._lines)
def _open(self):
self._fps = [
io.open(
path,
mode='rt',
encoding=encoding,
errors=errors,
newline=newline,
) for path, encoding, errors, newline in
six.moves.zip(self._paths, self._encoding, self._errors,
self._newline)
]
def close(self):
"""Manually closes all text files.
In most cases, you do not have to call this method, because files will
automatically be closed after TextDataset instance goes out of scope.
"""
exc = None
for fp in self._fps:
try:
fp.close()
except Exception:
exc = sys.exc_info()
if exc is not None:
six.reraise(*exc)
def get_example(self, idx):
if idx < 0 or len(self._lines) <= idx:
raise IndexError
linenum = self._lines[idx]
self._lock.acquire()
try:
for k, fp in enumerate(self._fps):
fp.seek(self._bounds[k][linenum])
lines = [fp.readline() for fp in self._fps]
if len(lines) == 1:
return lines[0]
return tuple(lines)
finally:
self._lock.release()
| 6,272
| 35.260116
| 78
|
py
|
chainer
|
chainer-master/chainer/datasets/kuzushiji_mnist.py
|
import os
import numpy
import chainer
from chainer.dataset import download
from chainer.datasets._mnist_helper import make_npz
from chainer.datasets._mnist_helper import preprocess_mnist
_kuzushiji_mnist_labels = [('o', u'\u304A'), ('ki', u'\u304D'),
('su', u'\u3059'), ('tsu', u'\u3064'),
('na', u'\u306A'), ('ha', u'\u306F'),
('ma', u'\u307E'), ('ya', u'\u3084'),
('re', u'\u308C'), ('wo', u'\u3092')]
def get_kuzushiji_mnist_labels():
"""Provides a list of labels for the Kuzushiji-MNIST dataset.
Returns:
List of labels in the form of tuples. Each tuple contains the
character name in romaji as a string value and the unicode codepoint
for the character.
"""
return _kuzushiji_mnist_labels
def get_kuzushiji_mnist(withlabel=True, ndim=1, scale=1., dtype=None,
label_dtype=numpy.int32, rgb_format=False):
"""Gets the Kuzushiji-MNIST dataset.
`Kuzushiji-MNIST (KMNIST) <http://codh.rois.ac.jp/kmnist/>`_ is a set of
hand-written Japanese characters represented by grey-scale 28x28 images.
In the original images, each pixel is represented by one-byte unsigned
integer. This function scales the pixels to floating point values in the
interval ``[0, scale]``.
This function returns the training set and the test set of the official
KMNIST dataset. If ``withlabel`` is ``True``, each dataset consists of
tuples of images and labels, otherwise it only consists of images.
Args:
withlabel (bool): If ``True``, it returns datasets with labels. In this
case, each example is a tuple of an image and a label. Otherwise,
the datasets only contain images.
ndim (int): Number of dimensions of each image. The shape of each image
is determined depending on ``ndim`` as follows:
- ``ndim == 1``: the shape is ``(784,)``
- ``ndim == 2``: the shape is ``(28, 28)``
- ``ndim == 3``: the shape is ``(1, 28, 28)``
scale (float): Pixel value scale. If it is 1 (default), pixels are
scaled to the interval ``[0, 1]``.
dtype: Data type of resulting image arrays. ``chainer.config.dtype`` is
used by default (see :ref:`configuration`).
label_dtype: Data type of the labels.
rgb_format (bool): if ``ndim == 3`` and ``rgb_format`` is ``True``, the
image will be converted to rgb format by duplicating the channels
so the image shape is (3, 28, 28). Default is ``False``.
Returns:
A tuple of two datasets. If ``withlabel`` is ``True``, both datasets
are :class:`~chainer.datasets.TupleDataset` instances. Otherwise, both
datasets are arrays of images.
"""
dtype = chainer.get_dtype(dtype)
train_raw = _retrieve_kuzushiji_mnist_training()
train = preprocess_mnist(train_raw, withlabel, ndim, scale, dtype,
label_dtype, rgb_format)
test_raw = _retrieve_kuzushiji_mnist_test()
test = preprocess_mnist(test_raw, withlabel, ndim, scale, dtype,
label_dtype, rgb_format)
return train, test
def _retrieve_kuzushiji_mnist_training():
base_url = 'http://codh.rois.ac.jp/'
urls = [base_url + 'kmnist/dataset/kmnist/train-images-idx3-ubyte.gz',
base_url + 'kmnist/dataset/kmnist/train-labels-idx1-ubyte.gz']
return _retrieve_kuzushiji_mnist('train.npz', urls)
def _retrieve_kuzushiji_mnist_test():
base_url = 'http://codh.rois.ac.jp/'
urls = [base_url + 'kmnist/dataset/kmnist/t10k-images-idx3-ubyte.gz',
base_url + 'kmnist/dataset/kmnist/t10k-labels-idx1-ubyte.gz']
return _retrieve_kuzushiji_mnist('test.npz', urls)
def _retrieve_kuzushiji_mnist(name, urls):
root = download.get_dataset_directory('pfnet/chainer/kuzushiji_mnist')
path = os.path.join(root, name)
return download.cache_or_load_file(
path, lambda path: make_npz(path, urls), numpy.load)
| 4,102
| 40.444444
| 79
|
py
|
chainer
|
chainer-master/chainer/datasets/concatenated_dataset.py
|
from chainer.dataset import dataset_mixin
class ConcatenatedDataset(dataset_mixin.DatasetMixin):
"""Dataset which concatenates some base datasets.
This dataset wraps some base datasets and works as a concatenated dataset.
For example, if a base dataset with 10 samples and
another base dataset with 20 samples are given, this dataset works as
a dataset which has 30 samples.
Args:
datasets: The underlying datasets. Each dataset has to support
:meth:`__len__` and :meth:`__getitem__`.
"""
def __init__(self, *datasets):
self._datasets = datasets
def __len__(self):
return sum(len(dataset) for dataset in self._datasets)
def get_example(self, i):
if i < 0:
raise IndexError
for dataset in self._datasets:
if i < len(dataset):
return dataset[i]
i -= len(dataset)
raise IndexError
| 939
| 27.484848
| 78
|
py
|
chainer
|
chainer-master/chainer/datasets/image_dataset.py
|
import bisect
import io
import os
import threading
import zipfile
import numpy
try:
from PIL import Image
available = True
except ImportError as e:
available = False
_import_error = e
import six
import chainer
from chainer.dataset import dataset_mixin
def _read_image_as_array(path, dtype):
f = Image.open(path)
try:
image = numpy.asarray(f, dtype=dtype)
finally:
# Only pillow >= 3.0 has 'close' method
if hasattr(f, 'close'):
f.close()
return image
def _postprocess_image(image):
if image.ndim == 2:
# image is greyscale
image = image[..., None]
return image.transpose(2, 0, 1)
class ImageDataset(dataset_mixin.DatasetMixin):
"""Dataset of images built from a list of paths to image files.
This dataset reads an external image file on every call of the
:meth:`__getitem__` operator. The paths to the image to retrieve is given
as either a list of strings or a text file that contains paths in distinct
lines.
Each image is automatically converted to arrays of shape
``channels, height, width``, where ``channels`` represents the number of
channels in each pixel (e.g., 1 for grey-scale images, and 3 for RGB-color
images).
.. note::
**This dataset requires the Pillow package being installed.** In order
to use this dataset, install Pillow (e.g. by using the command ``pip
install Pillow``). Be careful to prepare appropriate libraries for image
formats you want to use (e.g. libpng for PNG images, and libjpeg for JPG
images).
.. warning::
**You are responsible for preprocessing the images before feeding them
to a model.** For example, if your dataset contains both RGB and
grayscale images, make sure that you convert them to the same format.
Otherwise you will get errors because the input dimensions are different
for RGB and grayscale images.
Args:
paths (str or list of strs): If it is a string, it is a path to a text
file that contains paths to images in distinct lines. If it is a
list of paths, the ``i``-th element represents the path to the
``i``-th image. In both cases, each path is a relative one from the
root path given by another argument.
root (str): Root directory to retrieve images from.
dtype: Data type of resulting image arrays. ``chainer.config.dtype`` is
used by default (see :ref:`configuration`).
"""
def __init__(self, paths, root='.', dtype=None):
_check_pillow_availability()
if isinstance(paths, six.string_types):
with open(paths) as paths_file:
paths = [path.strip() for path in paths_file]
self._paths = paths
self._root = root
self._dtype = chainer.get_dtype(dtype)
def __len__(self):
return len(self._paths)
def get_example(self, i):
path = os.path.join(self._root, self._paths[i])
image = _read_image_as_array(path, self._dtype)
return _postprocess_image(image)
class LabeledImageDataset(dataset_mixin.DatasetMixin):
"""Dataset of image and label pairs built from a list of paths and labels.
This dataset reads an external image file like :class:`ImageDataset`. The
difference from :class:`ImageDataset` is that this dataset also returns a
label integer. The paths and labels are given as either a list of pairs or
a text file contains paths/labels pairs in distinct lines. In the latter
case, each path and corresponding label are separated by white spaces. This
format is same as one used in Caffe.
.. note::
**This dataset requires the Pillow package being installed.** In order
to use this dataset, install Pillow (e.g. by using the command ``pip
install Pillow``). Be careful to prepare appropriate libraries for image
formats you want to use (e.g. libpng for PNG images, and libjpeg for JPG
images).
.. warning::
**You are responsible for preprocessing the images before feeding them
to a model.** For example, if your dataset contains both RGB and
grayscale images, make sure that you convert them to the same format.
Otherwise you will get errors because the input dimensions are different
for RGB and grayscale images.
Args:
pairs (str or list of tuples): If it is a string, it is a path to a
text file that contains paths to images in distinct lines. If it is
a list of pairs, the ``i``-th element represents a pair of the path
to the ``i``-th image and the corresponding label. In both cases,
each path is a relative one from the root path given by another
argument.
root (str): Root directory to retrieve images from.
dtype: Data type of resulting image arrays. ``chainer.config.dtype`` is
used by default (see :ref:`configuration`).
label_dtype: Data type of the labels.
"""
def __init__(self, pairs, root='.', dtype=None, label_dtype=numpy.int32):
_check_pillow_availability()
if isinstance(pairs, six.string_types):
pairs_path = pairs
with open(pairs_path) as pairs_file:
pairs = []
for i, line in enumerate(pairs_file):
pair = line.strip().split()
if len(pair) != 2:
raise ValueError(
'invalid format at line {} in file {}'.format(
i, pairs_path))
pairs.append((pair[0], int(pair[1])))
self._pairs = pairs
self._root = root
self._dtype = chainer.get_dtype(dtype)
self._label_dtype = label_dtype
def __len__(self):
return len(self._pairs)
def get_example(self, i):
path, int_label = self._pairs[i]
full_path = os.path.join(self._root, path)
image = _read_image_as_array(full_path, self._dtype)
label = numpy.array(int_label, dtype=self._label_dtype)
return _postprocess_image(image), label
class LabeledZippedImageDataset(dataset_mixin.DatasetMixin):
"""Dataset of zipped image and label pairs.
This dataset is zip version of :class:`LabeledImageDataset`. It
takes a zipfile like :class:`ZippedImageDataset`. The label file
shall contain lines like text file used in
:class:`LabeledImageDataset`, but a filename in each line of the
label file shall match with a file in the zip archive.
Args:
zipfilename (str): Path to a zipfile with images
labelfilename (str): Path to a label file. ``i``-th line shall
contain a filename and an integer label that corresponds
to the ``i``-th sample. A filename in the label file shall
match with a filename in the zip file given with
`zipfilename`.
dtype: Data type of resulting image arrays. ``chainer.config.dtype`` is
used by default (see :ref:`configuration`).
label_dtype: Data type of the labels.
"""
def __init__(self, zipfilename, labelfilename, dtype=None,
label_dtype=numpy.int32):
_check_pillow_availability()
pairs = []
with open(labelfilename) as pairs_file:
for i, line in enumerate(pairs_file):
pair = line.strip().split()
if len(pair) != 2:
raise ValueError(
'invalid format at line {} in file {}'.format(
i, pairs_file))
pairs.append((pair[0], int(pair[1])))
self._pairs = pairs
self._label_dtype = label_dtype
self._zipfile = ZippedImageDataset(zipfilename, dtype=dtype)
def __len__(self):
return len(self._pairs)
def get_example(self, i):
path, int_label = self._pairs[i]
label = numpy.array(int_label, dtype=self._label_dtype)
return self._zipfile.get_example(path), label
class MultiZippedImageDataset(dataset_mixin.DatasetMixin):
"""Dataset of images built from a list of paths to zip files.
This dataset reads an external image file in given zipfiles. The
zipfiles shall contain only image files.
This shall be able to replace ImageDataset and works better on NFS
and other networked file systems. The user shall find good balance
between zipfile size and number of zipfiles (e.g. granularity)
Args:
zipfilenames (list of strings): List of zipped archive filename.
dtype: Data type of resulting image arrays. ``chainer.config.dtype`` is
used by default (see :ref:`configuration`).
"""
def __init__(self, zipfilenames, dtype=None):
self._zfs = [ZippedImageDataset(fn, dtype) for fn in zipfilenames]
self._zpaths_accumlens = [0]
zplen = 0
for zf in self._zfs:
zplen += len(zf)
self._zpaths_accumlens.append(zplen)
def __len__(self):
return self._zpaths_accumlens[-1]
def get_example(self, i):
tgt = bisect.bisect(self._zpaths_accumlens, i) - 1
lidx = i - self._zpaths_accumlens[tgt]
return self._zfs[tgt].get_example(lidx)
class ZippedImageDataset(dataset_mixin.DatasetMixin):
"""Dataset of images built from a zip file.
This dataset reads an external image file in the given
zipfile. The zipfile shall contain only image files.
This shall be able to replace ImageDataset and works better on NFS
and other networked file systems. If zipfile becomes too large you
may consider ``MultiZippedImageDataset`` as a handy alternative.
Known issue: pickle and unpickle on same process may cause race
condition on ZipFile. Pickle of this class is expected to be sent
to different processess via ChainerMN.
Args:
zipfilename (str): a string to point zipfile path
dtype: Data type of resulting image arrays. ``chainer.config.dtype`` is
used by default (see :ref:`configuration`).
"""
def __init__(self, zipfilename, dtype=None):
self._zipfilename = zipfilename
self._zf = zipfile.ZipFile(zipfilename)
self._zf_pid = os.getpid()
self._dtype = chainer.get_dtype(dtype)
self._paths = [x for x in self._zf.namelist() if not x.endswith('/')]
self._lock = threading.Lock()
def __len__(self):
return len(self._paths)
def __getstate__(self):
d = self.__dict__.copy()
d['_zf'] = None
d['_lock'] = None
return d
def __setstate__(self, state):
self.__dict__ = state
self._lock = threading.Lock()
def get_example(self, i_or_filename):
# LabeledZippedImageDataset needs file with filename in zip archive
if isinstance(i_or_filename, six.integer_types):
zfn = self._paths[i_or_filename]
else:
zfn = i_or_filename
# PIL may seek() on the file -- zipfile won't support it
with self._lock:
if self._zf is None or self._zf_pid != os.getpid():
self._zf_pid = os.getpid()
self._zf = zipfile.ZipFile(self._zipfilename)
image_file_mem = self._zf.read(zfn)
image_file = io.BytesIO(image_file_mem)
image = _read_image_as_array(image_file, self._dtype)
return _postprocess_image(image)
def _check_pillow_availability():
if not available:
raise ImportError('PIL cannot be loaded. Install Pillow!\n'
'The actual import error is as follows:\n' +
str(_import_error))
| 11,769
| 36.845659
| 79
|
py
|
chainer
|
chainer-master/chainer/datasets/fashion_mnist.py
|
import os
import numpy
import chainer
from chainer.dataset import download
from chainer.datasets._mnist_helper import make_npz
from chainer.datasets._mnist_helper import preprocess_mnist
_fashion_mnist_labels = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
def get_fashion_mnist_labels():
"""Provide a list of the string value names of the labels.
Returns:
List of string values of the image labels.
"""
return list(_fashion_mnist_labels)
def get_fashion_mnist(withlabel=True, ndim=1, scale=1., dtype=None,
label_dtype=numpy.int32, rgb_format=False):
"""Gets the Fashion-MNIST dataset.
`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist/>`_ is a
set of fashion articles represented by grey-scale 28x28 images. In the
original images, each pixel is represented by one-byte unsigned integer.
This function scales the pixels to floating point values in the interval
``[0, scale]``.
This function returns the training set and the test set of the official
Fashion-MNIST dataset. If ``withlabel`` is ``True``, each dataset consists
of tuples of images and labels, otherwise it only consists of images.
Args:
withlabel (bool): If ``True``, it returns datasets with labels. In this
case, each example is a tuple of an image and a label. Otherwise,
the datasets only contain images.
ndim (int): Number of dimensions of each image. The shape of each image
is determined depending on ``ndim`` as follows:
- ``ndim == 1``: the shape is ``(784,)``
- ``ndim == 2``: the shape is ``(28, 28)``
- ``ndim == 3``: the shape is ``(1, 28, 28)``
scale (float): Pixel value scale. If it is 1 (default), pixels are
scaled to the interval ``[0, 1]``.
dtype: Data type of resulting image arrays. ``chainer.config.dtype`` is
used by default (see :ref:`configuration`).
label_dtype: Data type of the labels.
rgb_format (bool): if ``ndim == 3`` and ``rgb_format`` is ``True``, the
image will be converted to rgb format by duplicating the channels
so the image shape is (3, 28, 28). Default is ``False``.
Returns:
A tuple of two datasets. If ``withlabel`` is ``True``, both datasets
are :class:`~chainer.datasets.TupleDataset` instances. Otherwise, both
datasets are arrays of images.
"""
train_raw = _retrieve_fashion_mnist_training()
dtype = chainer.get_dtype(dtype)
train = preprocess_mnist(train_raw, withlabel, ndim, scale, dtype,
label_dtype, rgb_format)
test_raw = _retrieve_fashion_mnist_test()
test = preprocess_mnist(test_raw, withlabel, ndim, scale, dtype,
label_dtype, rgb_format)
return train, test
def _retrieve_fashion_mnist_training():
base_url = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'
urls = [base_url + 'train-images-idx3-ubyte.gz',
base_url + 'train-labels-idx1-ubyte.gz']
return _retrieve_fashion_mnist('train.npz', urls)
def _retrieve_fashion_mnist_test():
base_url = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'
urls = [base_url + 't10k-images-idx3-ubyte.gz',
base_url + 't10k-labels-idx1-ubyte.gz']
return _retrieve_fashion_mnist('test.npz', urls)
def _retrieve_fashion_mnist(name, urls):
root = download.get_dataset_directory('pfnet/chainer/fashion-mnist')
path = os.path.join(root, name)
return download.cache_or_load_file(
path, lambda path: make_npz(path, urls), numpy.load)
| 3,771
| 38.705263
| 79
|
py
|
chainer
|
chainer-master/chainer/datasets/tuple_dataset.py
|
import six
class TupleDataset(object):
"""Dataset of tuples from multiple equal-length datasets.
A ``TupleDataset`` combines multiple equal-length datasets into a single
dataset of tuples. The ``i``-th tuple contains the ``i``-th example from
each of the argument datasets, in the same order that they were supplied.
Recall that in Chainer, a dataset is defined as an iterable that supports
both ``__getitem__`` and ``__len__``. The ``__getitem__`` method should
support indexing by both an integer and a slice.
As an example, consider creating a ``TupleDataset`` from two argument
datasets ``d1 = [8, 0, 5, 1]`` and ``d2 = [3, 1, 7, 4]`` as
``tuple_dataset = TupleDataset(d1, d2)``. The ``tuple_dataset`` will
then contain the examples ``(8, 3), (0, 1), (5, 7), (1, 4)``. Note that
this behavior is similar to that of the built-in :func:`zip` function.
Args:
datasets: Underlying datasets that will be aggregated. Each dataset
must be an iterable that implements ``__getitem__`` and
``__len__``. The ``j``-th dataset will be used for the ``j``-th
item of each example tuple. All datasets must have the same length.
"""
def __init__(self, *datasets):
if not datasets:
raise ValueError('no datasets are given')
length = len(datasets[0])
for i, dataset in enumerate(datasets):
if len(dataset) != length:
raise ValueError(
'dataset of the index {} has a wrong length'.format(i))
self._datasets = datasets
self._length = length
def __getitem__(self, index):
batches = [dataset[index] for dataset in self._datasets]
if isinstance(index, slice):
length = len(batches[0])
return [tuple([batch[i] for batch in batches])
for i in six.moves.range(length)]
else:
return tuple(batches)
def __len__(self):
return self._length
| 2,024
| 37.942308
| 79
|
py
|
chainer
|
chainer-master/chainer/datasets/dict_dataset.py
|
import six
class DictDataset(object):
"""Dataset of a dictionary of datasets.
It combines multiple datasets into one dataset. Each example is represented
by a dictionary mapping a key to an example of the corresponding dataset.
Args:
datasets: Underlying datasets. The keys are used as the keys of each
example. All datasets must have the same length.
"""
def __init__(self, **datasets):
if not datasets:
raise ValueError('no datasets are given')
length = None
for key, dataset in six.iteritems(datasets):
if length is None:
length = len(dataset)
elif length != len(dataset):
raise ValueError(
'dataset length conflicts at "{}"'.format(key))
self._datasets = datasets
self._length = length
def __getitem__(self, index):
batches = {key: dataset[index]
for key, dataset in six.iteritems(self._datasets)}
if isinstance(index, slice):
length = len(six.next(six.itervalues(batches)))
return [{key: batch[i] for key, batch in six.iteritems(batches)}
for i in six.moves.range(length)]
else:
return batches
def __len__(self):
return self._length
| 1,334
| 30.785714
| 79
|
py
|
chainer
|
chainer-master/chainer/datasets/transform_dataset.py
|
from chainer.dataset import dataset_mixin
class TransformDataset(dataset_mixin.DatasetMixin):
"""Dataset that indexes the base dataset and transforms the data.
This dataset wraps the base dataset by modifying the behavior of the base
dataset's :meth:`__getitem__`. Arrays returned by :meth:`__getitem__` of
the base dataset with an integer as an argument are transformed by the
given function :obj:`transform`.
Also, :meth:`__len__` returns the integer returned by the base dataset's
:meth:`__len__`.
The function :obj:`transform` takes, as an argument, :obj:`in_data`, which
is the output of the base dataset's :meth:`__getitem__`, and returns
the transformed arrays as output. Please see the following example. Since
:obj:`in_data` directly refers to the item in the dataset, take care that
:obj:`transform` not modify it. For example, note that the line
`img = img - 0.5` bellow is correct since it makes a copy of `img`.
However, it would be incorrect to use `img -= 0.5` since that would update
the contents of the item in the dataset in place, corrupting it.
>>> from chainer.datasets import get_mnist
>>> from chainer.datasets import TransformDataset
>>> dataset, _ = get_mnist()
>>> def transform(in_data):
... img, label = in_data
... img = img - 0.5 # scale to [-0.5, 0.5]
... return img, label
>>> dataset = TransformDataset(dataset, transform)
Args:
dataset: The underlying dataset. The index of this dataset corresponds
to the index of the base dataset. This object needs to support
functions :meth:`__getitem__` and :meth:`__len__` as described
above.
transform (callable): A function that is called to transform values
returned by the underlying dataset's :meth:`__getitem__`.
"""
def __init__(self, dataset, transform):
self._dataset = dataset
self._transform = transform
def __len__(self):
return len(self._dataset)
def get_example(self, i):
in_data = self._dataset[i]
return self._transform(in_data)
| 2,158
| 39.735849
| 78
|
py
|
chainer
|
chainer-master/chainer/datasets/__init__.py
|
# import classes and functions
from chainer.datasets.cifar import get_cifar10 # NOQA
from chainer.datasets.cifar import get_cifar100 # NOQA
from chainer.datasets.concatenated_dataset import ConcatenatedDataset # NOQA
from chainer.datasets.dict_dataset import DictDataset # NOQA
from chainer.datasets.fashion_mnist import get_fashion_mnist # NOQA
from chainer.datasets.fashion_mnist import get_fashion_mnist_labels # NOQA
from chainer.datasets.image_dataset import ImageDataset # NOQA
from chainer.datasets.image_dataset import LabeledImageDataset # NOQA
from chainer.datasets.image_dataset import LabeledZippedImageDataset # NOQA
from chainer.datasets.image_dataset import MultiZippedImageDataset # NOQA
from chainer.datasets.image_dataset import ZippedImageDataset # NOQA
from chainer.datasets.kuzushiji_mnist import get_kuzushiji_mnist # NOQA
from chainer.datasets.kuzushiji_mnist import get_kuzushiji_mnist_labels # NOQA
from chainer.datasets.mnist import get_mnist # NOQA
from chainer.datasets.pickle_dataset import open_pickle_dataset # NOQA
from chainer.datasets.pickle_dataset import open_pickle_dataset_writer # NOQA
from chainer.datasets.pickle_dataset import PickleDataset # NOQA
from chainer.datasets.pickle_dataset import PickleDatasetWriter # NOQA
from chainer.datasets.ptb import get_ptb_words # NOQA
from chainer.datasets.ptb import get_ptb_words_vocabulary # NOQA
from chainer.datasets.sub_dataset import get_cross_validation_datasets # NOQA
from chainer.datasets.sub_dataset import get_cross_validation_datasets_random # NOQA
from chainer.datasets.sub_dataset import split_dataset # NOQA
from chainer.datasets.sub_dataset import split_dataset_n # NOQA
from chainer.datasets.sub_dataset import split_dataset_n_random # NOQA
from chainer.datasets.sub_dataset import split_dataset_random # NOQA
from chainer.datasets.sub_dataset import SubDataset # NOQA
from chainer.datasets.svhn import get_svhn # NOQA
from chainer.datasets.text_dataset import TextDataset # NOQA
from chainer.datasets.transform_dataset import TransformDataset # NOQA
from chainer.datasets.tuple_dataset import TupleDataset # NOQA
| 2,144
| 64
| 85
|
py
|
chainer
|
chainer-master/chainer/datasets/svhn.py
|
import os
import numpy
try:
from scipy import io
_scipy_available = True
except Exception as e:
_error = e
_scipy_available = False
import chainer
from chainer.dataset import download
from chainer.datasets import tuple_dataset
def get_svhn(withlabel=True, scale=1., dtype=None, label_dtype=numpy.int32,
add_extra=False):
"""Gets the SVHN dataset.
`The Street View House Numbers (SVHN) dataset
<http://ufldl.stanford.edu/housenumbers/>`_
is a dataset similar to MNIST but composed of cropped images of house
numbers.
The functionality of this function is identical to the counterpart for the
MNIST dataset (:func:`~chainer.datasets.get_mnist`),
with the exception that there is no ``ndim`` argument.
.. note::
`SciPy <https://www.scipy.org/>`_ is required to use this feature.
Args:
withlabel (bool): If ``True``, it returns datasets with labels. In this
case, each example is a tuple of an image and a label. Otherwise,
the datasets only contain images.
scale (float): Pixel value scale. If it is 1 (default), pixels are
scaled to the interval ``[0, 1]``.
dtype: Data type of resulting image arrays. ``chainer.config.dtype`` is
used by default (see :ref:`configuration`).
label_dtype: Data type of the labels.
add_extra: Use extra training set.
Returns:
If ``add_extra`` is ``False``, a tuple of two datasets (train and
test). Otherwise, a tuple of three datasets (train, test, and extra).
If ``withlabel`` is ``True``, all datasets are
:class:`~chainer.datasets.TupleDataset` instances. Otherwise, both
datasets are arrays of images.
"""
if not _scipy_available:
raise RuntimeError('SciPy is not available: %s' % _error)
train_raw = _retrieve_svhn_training()
dtype = chainer.get_dtype(dtype)
train = _preprocess_svhn(train_raw, withlabel, scale, dtype,
label_dtype)
test_raw = _retrieve_svhn_test()
test = _preprocess_svhn(test_raw, withlabel, scale, dtype,
label_dtype)
if add_extra:
extra_raw = _retrieve_svhn_extra()
extra = _preprocess_svhn(extra_raw, withlabel, scale, dtype,
label_dtype)
return train, test, extra
else:
return train, test
def _preprocess_svhn(raw, withlabel, scale, image_dtype, label_dtype):
images = raw['x'].transpose(3, 2, 0, 1)
images = images.astype(image_dtype)
images *= scale / 255.
labels = raw['y'].astype(label_dtype).flatten()
# labels go from 1-10, with the digit "0" having label 10.
# Set "0" to be label 0 to restore expected ordering
labels[labels == 10] = 0
if withlabel:
return tuple_dataset.TupleDataset(images, labels)
else:
return images
def _retrieve_svhn_training():
url = 'http://ufldl.stanford.edu/housenumbers/train_32x32.mat'
return _retrieve_svhn('train.npz', url)
def _retrieve_svhn_test():
url = 'http://ufldl.stanford.edu/housenumbers/test_32x32.mat'
return _retrieve_svhn('test.npz', url)
def _retrieve_svhn_extra():
url = 'http://ufldl.stanford.edu/housenumbers/extra_32x32.mat'
return _retrieve_svhn('extra.npz', url)
def _retrieve_svhn(name, url):
root = download.get_dataset_directory('pfnet/chainer/svhn')
path = os.path.join(root, name)
return download.cache_or_load_file(
path, lambda path: _make_npz(path, url), numpy.load)
def _make_npz(path, url):
_path = download.cached_download(url)
raw = io.loadmat(_path)
images = raw['X'].astype(numpy.uint8)
labels = raw['y'].astype(numpy.uint8)
numpy.savez_compressed(path, x=images, y=labels)
return {'x': images, 'y': labels}
| 3,857
| 32.258621
| 79
|
py
|
chainer
|
chainer-master/chainer/datasets/_mnist_helper.py
|
import gzip
import struct
import numpy
import six
from chainer.dataset import download
from chainer.datasets import tuple_dataset
def make_npz(path, urls):
x_url, y_url = urls
x_path = download.cached_download(x_url)
y_path = download.cached_download(y_url)
with gzip.open(x_path, 'rb') as fx, gzip.open(y_path, 'rb') as fy:
fx.read(4)
fy.read(4)
N, = struct.unpack('>i', fx.read(4))
if N != struct.unpack('>i', fy.read(4))[0]:
raise RuntimeError('wrong pair of MNIST images and labels')
fx.read(8)
x = numpy.empty((N, 784), dtype=numpy.uint8)
y = numpy.empty(N, dtype=numpy.uint8)
for i in six.moves.range(N):
y[i] = ord(fy.read(1))
for j in six.moves.range(784):
x[i, j] = ord(fx.read(1))
numpy.savez_compressed(path, x=x, y=y)
return {'x': x, 'y': y}
def preprocess_mnist(raw, withlabel, ndim, scale, image_dtype, label_dtype,
rgb_format):
images = raw['x']
if ndim == 2:
images = images.reshape(-1, 28, 28)
elif ndim == 3:
images = images.reshape(-1, 1, 28, 28)
if rgb_format:
images = numpy.broadcast_to(images,
(len(images), 3) + images.shape[2:])
elif ndim != 1:
raise ValueError('invalid ndim for MNIST dataset')
images = images.astype(image_dtype)
images *= scale / 255.
if withlabel:
labels = raw['y'].astype(label_dtype)
return tuple_dataset.TupleDataset(images, labels)
else:
return images
| 1,614
| 27.839286
| 76
|
py
|
chainer
|
chainer-master/chainer/datasets/cifar.py
|
import os
import sys
import tarfile
import numpy
import six.moves.cPickle as pickle
import chainer
from chainer.dataset import download
from chainer.datasets import tuple_dataset
def get_cifar10(withlabel=True, ndim=3, scale=1., dtype=None):
"""Gets the CIFAR-10 dataset.
`CIFAR-10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ is a set of small
natural images. Each example is an RGB color image of size 32x32,
classified into 10 groups. In the original images, each component of pixels
is represented by one-byte unsigned integer. This function scales the
components to floating point values in the interval ``[0, scale]``.
This function returns the training set and the test set of the official
CIFAR-10 dataset. If ``withlabel`` is ``True``, each dataset consists of
tuples of images and labels, otherwise it only consists of images.
Args:
withlabel (bool): If ``True``, it returns datasets with labels. In this
case, each example is a tuple of an image and a label. Otherwise,
the datasets only contain images.
ndim (int): Number of dimensions of each image. The shape of each image
is determined depending on ndim as follows:
- ``ndim == 1``: the shape is ``(3072,)``
- ``ndim == 3``: the shape is ``(3, 32, 32)``
scale (float): Pixel value scale. If it is 1 (default), pixels are
scaled to the interval ``[0, 1]``.
dtype: Data type of resulting image arrays. ``chainer.config.dtype`` is
used by default (see :ref:`configuration`).
Returns:
A tuple of two datasets. If ``withlabel`` is ``True``, both datasets
are :class:`~chainer.datasets.TupleDataset` instances. Otherwise, both
datasets are arrays of images.
"""
return _get_cifar('cifar-10', withlabel, ndim, scale, dtype)
def get_cifar100(withlabel=True, ndim=3, scale=1., dtype=None):
"""Gets the CIFAR-100 dataset.
`CIFAR-100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ is a set of
small natural images. Each example is an RGB color image of size 32x32,
classified into 100 groups. In the original images, each component
pixels is represented by one-byte unsigned integer. This function scales
the components to floating point values in the interval ``[0, scale]``.
This function returns the training set and the test set of the official
CIFAR-100 dataset. If ``withlabel`` is ``True``, each dataset consists of
tuples of images and labels, otherwise it only consists of images.
Args:
withlabel (bool): If ``True``, it returns datasets with labels. In this
case, each example is a tuple of an image and a label. Otherwise,
the datasets only contain images.
ndim (int): Number of dimensions of each image. The shape of each image
is determined depending on ndim as follows:
- ``ndim == 1``: the shape is ``(3072,)``
- ``ndim == 3``: the shape is ``(3, 32, 32)``
scale (float): Pixel value scale. If it is 1 (default), pixels are
scaled to the interval ``[0, 1]``.
dtype: Data type of resulting image arrays. ``chainer.config.dtype`` is
used by default (see :ref:`configuration`).
Returns:
A tuple of two datasets. If ``withlabel`` is ``True``, both
are :class:`~chainer.datasets.TupleDataset` instances. Otherwise, both
datasets are arrays of images.
"""
return _get_cifar('cifar-100', withlabel, ndim, scale, dtype)
def _get_cifar(name, withlabel, ndim, scale, dtype):
root = download.get_dataset_directory(os.path.join('pfnet', 'chainer',
'cifar'))
npz_path = os.path.join(root, '{}.npz'.format(name))
url = 'https://www.cs.toronto.edu/~kriz/{}-python.tar.gz'.format(name)
def creator(path):
archive_path = download.cached_download(url)
if name == 'cifar-10':
train_x = numpy.empty((5, 10000, 3072), dtype=numpy.uint8)
train_y = numpy.empty((5, 10000), dtype=numpy.uint8)
test_y = numpy.empty(10000, dtype=numpy.uint8)
dir_name = '{}-batches-py'.format(name)
with tarfile.open(archive_path, 'r:gz') as archive:
# training set
for i in range(5):
file_name = '{}/data_batch_{}'.format(dir_name, i + 1)
d = _pickle_load(archive.extractfile(file_name))
train_x[i] = d['data']
train_y[i] = d['labels']
# test set
file_name = '{}/test_batch'.format(dir_name)
d = _pickle_load(archive.extractfile(file_name))
test_x = d['data']
test_y[...] = d['labels'] # copy to array
train_x = train_x.reshape(50000, 3072)
train_y = train_y.reshape(50000)
else:
# name == 'cifar-100'
def load(archive, file_name):
d = _pickle_load(archive.extractfile(file_name))
x = d['data'].reshape((-1, 3072))
y = numpy.array(d['fine_labels'], dtype=numpy.uint8)
return x, y
with tarfile.open(archive_path, 'r:gz') as archive:
train_x, train_y = load(archive, 'cifar-100-python/train')
test_x, test_y = load(archive, 'cifar-100-python/test')
numpy.savez_compressed(path, train_x=train_x, train_y=train_y,
test_x=test_x, test_y=test_y)
return {'train_x': train_x, 'train_y': train_y,
'test_x': test_x, 'test_y': test_y}
raw = download.cache_or_load_file(npz_path, creator, numpy.load)
train = _preprocess_cifar(raw['train_x'], raw['train_y'], withlabel,
ndim, scale, dtype)
test = _preprocess_cifar(raw['test_x'], raw['test_y'], withlabel, ndim,
scale, dtype)
return train, test
def _preprocess_cifar(images, labels, withlabel, ndim, scale, dtype):
if ndim == 1:
images = images.reshape(-1, 3072)
elif ndim == 3:
images = images.reshape(-1, 3, 32, 32)
else:
raise ValueError('invalid ndim for CIFAR dataset')
dtype = chainer.get_dtype(dtype)
images = images.astype(dtype)
images *= scale / 255.
if withlabel:
labels = labels.astype(numpy.int32)
return tuple_dataset.TupleDataset(images, labels)
else:
return images
def _pickle_load(f):
if sys.version_info > (3, ):
# python3
return pickle.load(f, encoding='latin-1')
else:
# python2
return pickle.load(f)
| 6,772
| 39.076923
| 79
|
py
|
chainer
|
chainer-master/chainer/datasets/ptb.py
|
import os
import numpy
from chainer.dataset import download
def get_ptb_words():
"""Gets the Penn Tree Bank dataset as long word sequences.
`Penn Tree Bank <https://catalog.ldc.upenn.edu/LDC99T42>`_
is originally a corpus of English sentences with linguistic structure
annotations. This function uses a variant distributed at
`https://github.com/wojzaremba/lstm <https://github.com/wojzaremba/lstm>`_,
which omits the annotation and splits the dataset into three parts:
training, validation, and test.
This function returns the training, validation, and test sets, each of
which is represented as a long array of word IDs. All sentences in the
dataset are concatenated by End-of-Sentence mark '<eos>', which is treated
as one of the vocabulary.
Returns:
tuple of numpy.ndarray: Int32 vectors of word IDs.
.. Seealso::
Use :func:`get_ptb_words_vocabulary` to get the mapping between the
words and word IDs.
"""
train = _retrieve_ptb_words('train.npz', _train_url)
valid = _retrieve_ptb_words('valid.npz', _valid_url)
test = _retrieve_ptb_words('test.npz', _test_url)
return train, valid, test
def get_ptb_words_vocabulary():
"""Gets the Penn Tree Bank word vocabulary.
Returns:
dict: Dictionary that maps words to corresponding word IDs. The IDs are
used in the Penn Tree Bank long sequence datasets.
.. seealso::
See :func:`get_ptb_words` for the actual datasets.
"""
return _retrieve_word_vocabulary()
_train_url = 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.train.txt' # NOQA
_valid_url = 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.valid.txt' # NOQA
_test_url = 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.test.txt' # NOQA
def _retrieve_ptb_words(name, url):
def creator(path):
vocab = _retrieve_word_vocabulary()
words = _load_words(url)
x = numpy.empty(len(words), dtype=numpy.int32)
for i, word in enumerate(words):
x[i] = vocab[word]
numpy.savez_compressed(path, x=x)
return {'x': x}
root = download.get_dataset_directory('pfnet/chainer/ptb')
path = os.path.join(root, name)
loaded = download.cache_or_load_file(path, creator, numpy.load)
return loaded['x']
def _retrieve_word_vocabulary():
def creator(path):
words = _load_words(_train_url)
vocab = {}
index = 0
with open(path, 'w') as f:
for word in words:
if word not in vocab:
vocab[word] = index
index += 1
f.write(word + '\n')
return vocab
def loader(path):
vocab = {}
with open(path) as f:
for i, word in enumerate(f):
vocab[word.strip()] = i
return vocab
root = download.get_dataset_directory('pfnet/chainer/ptb')
path = os.path.join(root, 'vocab.txt')
return download.cache_or_load_file(path, creator, loader)
def _load_words(url):
path = download.cached_download(url)
words = []
with open(path) as words_file:
for line in words_file:
if line:
words += line.strip().split()
words.append('<eos>')
return words
| 3,359
| 30.111111
| 98
|
py
|
chainer
|
chainer-master/chainer/datasets/mnist.py
|
import os
import numpy
import chainer
from chainer.dataset import download
from chainer.datasets._mnist_helper import make_npz
from chainer.datasets._mnist_helper import preprocess_mnist
def get_mnist(withlabel=True, ndim=1, scale=1., dtype=None,
label_dtype=numpy.int32, rgb_format=False):
"""Gets the MNIST dataset.
`MNIST <http://yann.lecun.com/exdb/mnist/>`_ is a set of hand-written
digits represented by grey-scale 28x28 images. In the original images, each
pixel is represented by one-byte unsigned integer. This function
scales the pixels to floating point values in the interval ``[0, scale]``.
This function returns the training set and the test set of the official
MNIST dataset. If ``withlabel`` is ``True``, each dataset consists of
tuples of images and labels, otherwise it only consists of images.
Args:
withlabel (bool): If ``True``, it returns datasets with labels. In this
case, each example is a tuple of an image and a label. Otherwise,
the datasets only contain images.
ndim (int): Number of dimensions of each image. The shape of each image
is determined depending on ``ndim`` as follows:
- ``ndim == 1``: the shape is ``(784,)``
- ``ndim == 2``: the shape is ``(28, 28)``
- ``ndim == 3``: the shape is ``(1, 28, 28)``
scale (float): Pixel value scale. If it is 1 (default), pixels are
scaled to the interval ``[0, 1]``.
dtype: Data type of resulting image arrays. ``chainer.config.dtype`` is
used by default (see :ref:`configuration`).
label_dtype: Data type of the labels.
rgb_format (bool): if ``ndim == 3`` and ``rgb_format`` is ``True``, the
image will be converted to rgb format by duplicating the channels
so the image shape is (3, 28, 28). Default is ``False``.
Returns:
A tuple of two datasets. If ``withlabel`` is ``True``, both datasets
are :class:`~chainer.datasets.TupleDataset` instances. Otherwise, both
datasets are arrays of images.
"""
dtype = chainer.get_dtype(dtype)
train_raw = _retrieve_mnist_training()
train = preprocess_mnist(train_raw, withlabel, ndim, scale, dtype,
label_dtype, rgb_format)
test_raw = _retrieve_mnist_test()
test = preprocess_mnist(test_raw, withlabel, ndim, scale, dtype,
label_dtype, rgb_format)
return train, test
def _retrieve_mnist_training():
urls = ['http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz']
return _retrieve_mnist('train.npz', urls)
def _retrieve_mnist_test():
urls = ['http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz']
return _retrieve_mnist('test.npz', urls)
def _retrieve_mnist(name, urls):
root = download.get_dataset_directory('pfnet/chainer/mnist')
path = os.path.join(root, name)
return download.cache_or_load_file(
path, lambda path: make_npz(path, urls), numpy.load)
| 3,215
| 40.766234
| 79
|
py
|
chainer
|
chainer-master/chainer/datasets/pickle_dataset.py
|
import io
import multiprocessing.util
import threading
import six
import six.moves.cPickle as pickle
from chainer.dataset import dataset_mixin
class PickleDatasetWriter(object):
"""Writer class that makes PickleDataset.
To make :class:`PickleDataset`, a user needs to prepare data using
:class:`PickleDatasetWriter`.
Args:
writer: File like object that supports ``write`` and ``tell`` methods.
protocol (int): Valid protocol for :mod:`pickle`.
.. seealso: chainer.datasets.PickleDataset
"""
def __init__(self, writer, protocol=pickle.HIGHEST_PROTOCOL):
self._positions = []
self._writer = writer
self._protocol = protocol
def close(self):
self._writer.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def write(self, x):
position = self._writer.tell()
pickle.dump(x, self._writer, protocol=self._protocol)
self._positions.append(position)
def flush(self):
if hasattr(self._writer, 'flush'):
self._writer.flush()
class PickleDataset(dataset_mixin.DatasetMixin):
"""Dataset stored in a storage using pickle.
:mod:`pickle` is the default serialization library of Python.
This dataset stores any objects in a storage using :mod:`pickle`.
Even when a user wants to use a large dataset, this dataset can stores all
data in a large storage like HDD and each data can be randomly accessible.
.. testsetup::
import tempfile
fs, path_to_data = tempfile.mkstemp()
>>> with chainer.datasets.open_pickle_dataset_writer(path_to_data) as w:
... w.write((1, 2.0, 'hello'))
... w.write((2, 3.0, 'good-bye'))
...
>>> with chainer.datasets.open_pickle_dataset(path_to_data) as dataset:
... print(dataset[1])
...
(2, 3.0, 'good-bye')
.. testcleanup::
import os
os.close(fs)
Args:
reader: File like object. `reader` must support random access.
"""
def __init__(self, reader):
# Only py3 supports `seekable` method
if six.PY3 and not reader.seekable():
raise ValueError('reader must support random access')
self._reader = reader
self._positions = []
reader.seek(0)
while True:
position = reader.tell()
try:
pickle.load(reader)
except EOFError:
break
self._positions.append(position)
self._lock = threading.RLock()
self._register_hook()
def _register_hook(self):
# TODO: Avoid using undocumented feature
multiprocessing.util.register_after_fork(
self, PickleDataset._after_fork)
def _after_fork(self):
if callable(getattr(self._reader, 'after_fork', None)):
self._reader.after_fork()
def __getstate__(self):
state = self.__dict__.copy()
del state['_lock']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._lock = threading.RLock()
self._register_hook()
def close(self):
"""Closes a file reader.
After a user calls this method, the dataset will no longer be
accessible..
"""
with self._lock:
self._reader.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __len__(self):
return len(self._positions)
def get_example(self, index):
with self._lock:
self._reader.seek(self._positions[index])
return pickle.load(self._reader)
class _FileReader(io.RawIOBase):
"""A file-like class implemented `after_fork()` hook
The method :meth:`after_fork` is called in the child process after forking,
and it closes and reopens the file object to avoid race condition caused by
open file description.
See: https://www.securecoding.cert.org/confluence/x/ZQG7AQ
"""
def __init__(self, path):
super(_FileReader, self).__init__()
self._path = path
self._fp = None
self._open()
def _open(self):
self._fp = open(self._path, 'rb')
def after_fork(self):
"""Reopens the file to avoid race condition."""
self.close()
self._open()
def __getstate__(self):
state = self.__dict__.copy()
del state['_fp']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._open()
# file-like interface
def flush(self):
self._fp.flush()
def close(self):
self._fp.close()
def fileno(self):
return self._fp.fileno()
def seekable(self):
return self._fp.seekable()
def seek(self, offset, whence=io.SEEK_SET):
return self._fp.seek(offset, whence)
def tell(self):
return self._fp.tell()
def readinto(self, b):
return self._fp.readinto(b)
def open_pickle_dataset(path):
"""Opens a dataset stored in a given path.
This is a helper function to open :class:`PickleDataset`. It opens a given
file in binary mode, and creates a :class:`PickleDataset` instance.
This method does not close the opened file. A user needs to call
:func:`PickleDataset.close` or use `with`:
.. code-block:: python
with chainer.datasets.open_pickle_dataset('path') as dataset:
pass # use dataset
Args:
path (str): Path to a dataset.
Returns:
chainer.datasets.PickleDataset: Opened dataset.
.. seealso: chainer.datasets.PickleDataset
"""
reader = _FileReader(path)
return PickleDataset(reader)
def open_pickle_dataset_writer(path, protocol=pickle.HIGHEST_PROTOCOL):
"""Opens a writer to make a PickleDataset.
This is a helper function to open :class:`PickleDatasetWriter`. It opens a
given file in binary mode and creates a :class:`PickleDatasetWriter`
instance.
This method does not close the opened file. A user needs to call
:func:`PickleDatasetWriter.close` or use `with`:
.. code-block:: python
with chainer.datasets.open_pickle_dataset_writer('path') as writer:
pass # use writer
Args:
path (str): Path to a dataset.
protocol (int): Valid protocol for :mod:`pickle`.
Returns:
chainer.datasets.PickleDatasetWriter: Opened writer.
.. seealso: chainer.datasets.PickleDataset
"""
writer = open(path, 'wb')
return PickleDatasetWriter(writer, protocol=protocol)
| 6,689
| 25.338583
| 79
|
py
|
chainer
|
chainer-master/chainer/datasets/sub_dataset.py
|
import numpy
import six
import warnings
from chainer.dataset import dataset_mixin
class SubDataset(dataset_mixin.DatasetMixin):
"""Subset of a base dataset.
SubDataset defines a subset of a given base dataset. The subset is defined
as an interval of indexes, optionally with a given permutation.
If ``order`` is given, then the ``i``-th example of this dataset is the
``order[start + i]``-th example of the base dataset, where ``i`` is a
non-negative integer. If ``order`` is not given, then the ``i``-th example
of this dataset is the ``start + i``-th example of the base dataset.
Negative indexing is also allowed: in this case, the term ``start + i`` is
replaced by ``finish + i``.
SubDataset is often used to split a dataset into training and validation
subsets. The training set is used for training, while the validation set is
used to track the generalization performance, i.e. how the learned model
works well on unseen data. We can tune hyperparameters (e.g. number of
hidden units, weight initializers, learning rate, etc.) by comparing the
validation performance. Note that we often use another set called test set
to measure the quality of the tuned hyperparameter, which can be made by
nesting multiple SubDatasets.
There are two ways to make training-validation splits. One is a single
split, where the dataset is split just into two subsets. It can be done by
:func:`split_dataset` or :func:`split_dataset_random`. The other one is a
:math:`k`-fold cross validation, in which the dataset is divided into
:math:`k` subsets, and :math:`k` different splits are generated using each
of the :math:`k` subsets as a validation set and the rest as a training
set. It can be done by :func:`get_cross_validation_datasets`.
Args:
dataset: Base dataset.
start (int): The first index in the interval.
finish (int): The next-to-the-last index in the interval.
order (sequence of ints): Permutation of indexes in the base dataset.
If this is ``None``, then the ascending order of indexes is used.
"""
def __init__(self, dataset, start, finish, order=None):
if start < 0 or finish > len(dataset):
raise ValueError('subset overruns the base dataset.')
self._dataset = dataset
self._start = start
self._finish = finish
self._size = finish - start
if order is not None and len(order) != len(dataset):
msg = ('order option must have the same length as the base '
'dataset: len(order) = {} while len(dataset) = {}'.format(
len(order), len(dataset)))
raise ValueError(msg)
self._order = order
def __len__(self):
return self._size
def get_example(self, i):
if i >= 0:
if i >= self._size:
raise IndexError('dataset index out of range')
index = self._start + i
else:
if i < -self._size:
raise IndexError('dataset index out of range')
index = self._finish + i
if self._order is not None:
index = self._order[index]
return self._dataset[index]
def split_dataset(dataset, split_at, order=None):
"""Splits a dataset into two subsets.
This function creates two instances of :class:`SubDataset`. These instances
do not share any examples, and they together cover all examples of the
original dataset.
Args:
dataset: Dataset to split.
split_at (int): Position at which the base dataset is split.
order (sequence of ints): Permutation of indexes in the base dataset.
See the documentation of :class:`SubDataset` for details.
Returns:
tuple: Two :class:`SubDataset` objects. The first subset represents the
examples of indexes ``order[:split_at]`` while the second subset
represents the examples of indexes ``order[split_at:]``.
"""
n_examples = len(dataset)
if not isinstance(split_at, (six.integer_types, numpy.integer)):
raise TypeError('split_at must be int, got {} instead'
.format(type(split_at)))
if split_at < 0:
raise ValueError('split_at must be non-negative')
if split_at > n_examples:
raise ValueError('split_at exceeds the dataset size')
subset1 = SubDataset(dataset, 0, split_at, order)
subset2 = SubDataset(dataset, split_at, n_examples, order)
return subset1, subset2
def split_dataset_random(dataset, first_size, seed=None):
"""Splits a dataset into two subsets randomly.
This function creates two instances of :class:`SubDataset`. These instances
do not share any examples, and they together cover all examples of the
original dataset. The split is automatically done randomly.
Args:
dataset: Dataset to split.
first_size (int): Size of the first subset.
seed (int): Seed the generator used for the permutation of indexes.
If an integer being convertible to 32 bit unsigned integers is
specified, it is guaranteed that each sample
in the given dataset always belongs to a specific subset.
If ``None``, the permutation is changed randomly.
Returns:
tuple: Two :class:`SubDataset` objects. The first subset contains
``first_size`` examples randomly chosen from the dataset without
replacement, and the second subset contains the rest of the
dataset.
"""
order = numpy.random.RandomState(seed).permutation(len(dataset))
return split_dataset(dataset, first_size, order)
def split_dataset_n(dataset, n, order=None):
"""Splits a dataset into ``n`` subsets.
Args:
dataset: Dataset to split.
n(int): The number of subsets.
order (sequence of ints): Permutation of indexes in the base dataset.
See the documentation of :class:`SubDataset` for details.
Returns:
list: List of ``n`` :class:`SubDataset` objects.
Each subset contains the examples of indexes
``order[i * (len(dataset) // n):(i + 1) * (len(dataset) // n)]``
.
"""
n_examples = len(dataset)
sub_size = n_examples // n
return [SubDataset(dataset, sub_size * i, sub_size * (i + 1), order)
for i in six.moves.range(n)]
def split_dataset_n_random(dataset, n, seed=None):
"""Splits a dataset into ``n`` subsets randomly.
Args:
dataset: Dataset to split.
n(int): The number of subsets.
seed (int): Seed the generator used for the permutation of indexes.
If an integer being convertible to 32 bit unsigned integers is
specified, it is guaranteed that each sample
in the given dataset always belongs to a specific subset.
If ``None``, the permutation is changed randomly.
Returns:
list: List of ``n`` :class:`SubDataset` objects.
Each subset contains ``len(dataset) // n`` examples randomly chosen
from the dataset without replacement.
"""
n_examples = len(dataset)
sub_size = n_examples // n
order = numpy.random.RandomState(seed).permutation(len(dataset))
return [SubDataset(dataset, sub_size * i, sub_size * (i + 1), order)
for i in six.moves.range(n)]
def get_cross_validation_datasets(dataset, n_folds=None, order=None, **kwargs):
"""Creates a set of training/test splits for cross validation.
This function generates ``n_folds`` splits of the given dataset. The first
part of each split corresponds to the training dataset, while the second
part to the test dataset. No pairs of test datasets share any examples, and
all test datasets together cover the whole base dataset. Each test dataset
contains almost same number of examples (the numbers may differ up to 1).
Args:
dataset: Dataset to split.
n_fold(int): *(deprecated)*
`n_fold` is now deprecated for consistency of naming choice.
Please use `n_folds` instead.
n_folds (int): Number of splits for cross validation.
order (sequence of ints): Order of indexes with which each split is
determined. If it is ``None``, then no permutation is used.
Returns:
list of tuples: List of dataset splits.
"""
if 'n_fold' in kwargs:
warnings.warn(
'Argument `n_fold` is deprecated. '
'Please use `n_folds` instead',
DeprecationWarning)
n_folds = kwargs['n_fold']
if order is None:
order = numpy.arange(len(dataset))
else:
order = numpy.array(order) # copy
whole_size = len(dataset)
borders = [whole_size * i // n_folds for i in six.moves.range(n_folds + 1)]
test_sizes = [borders[i + 1] - borders[i]
for i in six.moves.range(n_folds)]
splits = []
for test_size in reversed(test_sizes):
size = whole_size - test_size
splits.append(split_dataset(dataset, size, order))
new_order = numpy.empty_like(order)
new_order[:test_size] = order[-test_size:]
new_order[test_size:] = order[:-test_size]
order = new_order
return splits
def get_cross_validation_datasets_random(dataset, n_folds, seed=None,
**kwargs):
"""Creates a set of training/test splits for cross validation randomly.
This function acts almost same as :func:`get_cross_validation_dataset`,
except automatically generating random permutation.
Args:
dataset: Dataset to split.
n_fold (int): *(deprecated)*
`n_fold` is now deprecated for consistency of naming choice.
Please use `n_folds` instead.
n_folds (int): Number of splits for cross validation.
seed (int): Seed the generator used for the permutation of indexes.
If an integer beging convertible to 32 bit unsigned integers is
specified, it is guaranteed that each sample
in the given dataset always belongs to a specific subset.
If ``None``, the permutation is changed randomly.
Returns:
list of tuples: List of dataset splits.
"""
if 'n_fold' in kwargs:
warnings.warn(
'Argument `n_fold` is deprecated. '
'Please use `n_folds` instead',
DeprecationWarning)
n_folds = kwargs['n_fold']
order = numpy.random.RandomState(seed).permutation(len(dataset))
return get_cross_validation_datasets(dataset, n_folds, order)
| 10,676
| 38.988764
| 79
|
py
|
chainer
|
chainer-master/chainer/exporters/__init__.py
|
from chainer.exporters import caffe # NOQA
| 44
| 21.5
| 43
|
py
|
chainer
|
chainer-master/chainer/exporters/caffe.py
|
import collections
import heapq
import os
import numpy
import six
import chainer
from chainer import function
from chainer import function_node
from chainer.links.caffe.protobuf3 import caffe_pb2 as caffe_pb
from chainer import variable
_function_types = (function.Function, function_node.FunctionNode)
def _add_blob(layer, shape, data):
# The following part is ridiculously slow!!
# TODO(okuta): Replace with C++ extension call
blob = layer.blobs.add()
blob.shape.dim[:] = shape
blob.data[:] = data.flatten()
def _dump_graph(outputs):
fan_out = collections.defaultdict(int)
cand_funcs = []
def add_cand_to_check(cands):
for cand in cands:
x = cand.creator
if x is None:
continue
if x not in fan_out:
# `len(fan_out)` is in order to avoid comparing `x`
heapq.heappush(cand_funcs, (-x.rank, len(fan_out), x))
fan_out[x] += 1
add_cand_to_check(outputs)
while cand_funcs:
_, _, func = heapq.heappop(cand_funcs)
assert isinstance(func, _function_types)
add_cand_to_check(func.inputs)
ret = []
cand_funcs = []
seen_set = set()
def add_cand(cands):
cands = [cand.creator for cand in cands if cand.creator is not None]
for x in cands:
if x in seen_set:
continue
order = 1
if fan_out[x] == 1 and len(cands) == 1:
order = -len(seen_set)
# Negate since heapq is min-heap
# `len(seen_set)` is in order to avoid comparing `x`
heapq.heappush(cand_funcs, (order, -x.rank, -len(seen_set), x))
seen_set.add(x)
add_cand(outputs)
while cand_funcs:
_, _, _, func = heapq.heappop(cand_funcs)
ret.append(func)
add_cand(func.inputs)
return ret[::-1]
class _RetrieveAsCaffeModel(object):
debug = False
def __init__(self, prototxt, caffemodel=None):
self.caffemodel = caffemodel
self.prototxt = prototxt
# key:string, val:dict(key: func, val: index)
self.naming_map = collections.defaultdict(dict)
def _get_layer_name(self, layer):
"""Generate layer name like "Convolution2DFunction-10-2".
The first number means rank of the layer (depth from the top),
and the second number is for preventing duplication
(different layer objects can have same rank)
Args:
layer (~chainer.Function_node): Function object
Returns:
str: A string to be used for the ``name`` field of the graph
in the exported Caffe model.
"""
label = '{}-{}'.format(layer.label, layer.rank)
d = self.naming_map[label]
if layer not in d.keys():
d[layer] = len(d) + 1
return '{}-{}'.format(label, d[layer])
def _get_parent_name(self, parent_):
if parent_ is None:
return 'data'
return self._get_layer_name(parent_)
def _gen_layer_prototxt(self, layer_params, name='layer', depth=0,
indent=2):
if isinstance(layer_params, (dict, collections.OrderedDict)):
s = name + ' {\n'
indent_s = ' ' * ((depth + 1) * indent)
for key, val in layer_params.items():
s += indent_s + \
self._gen_layer_prototxt(val, name=key, depth=depth + 1)
s += ' ' * (depth * indent)
s += '}\n'
return s
elif isinstance(layer_params, bool):
return '{}: {}\n'.format(name, 'true' if layer_params else 'false')
elif isinstance(layer_params, six.integer_types + (float,)):
return '{}: {}\n'.format(name, layer_params)
elif isinstance(layer_params, str):
return '{}: "{}"\n'.format(name, layer_params)
elif isinstance(layer_params, list):
s = ''
indent_s = ' ' * depth * indent
for i, t in enumerate(layer_params):
if i != 0:
s += indent_s
s += self._gen_layer_prototxt(t, name=name, depth=depth + 1)
return s
else:
raise ValueError('Unsupported type: ' + str(type(layer_params)))
def dump_function_object(self, func, prototxt, net):
assert isinstance(func, _function_types)
layer_name = self._get_layer_name(func)
parent_layer_names = [self._get_parent_name(input_.creator)
for input_ in func.inputs]
params = collections.OrderedDict()
params['type'] = None
params['name'] = layer_name
params['bottom'] = parent_layer_names
params['top'] = [layer_name]
layer = None
if net is not None:
layer = net.layer.add()
if func.label == 'LinearFunction':
if len(func.inputs) == 2:
_, W = func.inputs
b = None
else:
_, W, b = func.inputs
n_out, n_in = W.shape
inner_product_param = {
'num_output': n_out,
'bias_term': b is not None,
}
params['type'] = 'InnerProduct'
params['inner_product_param'] = inner_product_param
params['bottom'] = params['bottom'][:1]
if net is not None:
for k, v in six.iteritems(inner_product_param):
setattr(layer.inner_product_param, k, v)
_add_blob(layer, list(W.shape), W.data)
if b is not None:
b.retain_data()
_add_blob(layer, list(b.shape), b.data)
elif func.label in ('Convolution2DFunction',
'Deconvolution2DFunction'):
if len(func.inputs) == 2:
_, W = func.inputs
b = None
else:
_, W, b = func.inputs
n_out, n_in, kw, kh = W.shape
convolution_param = {
'num_output': n_out,
'bias_term': b is not None,
'pad_w': func.pw,
'pad_h': func.ph,
'stride_w': func.sx,
'stride_h': func.sy,
'kernel_w': kw,
'kernel_h': kh,
'group': func.groups
}
params['bottom'] = params['bottom'][:1]
if func.label == 'Convolution2DFunction':
params['type'] = 'Convolution'
else:
params['type'] = 'Deconvolution'
convolution_param['num_output'] = n_in
params['convolution_param'] = convolution_param
if net is not None:
for k, v in six.iteritems(convolution_param):
setattr(layer.convolution_param, k, v)
_add_blob(layer, [n_out, n_in, kh, kw], W.data)
if b is not None:
b.retain_data()
_add_blob(layer, [n_out], b.data)
elif func.label == 'AveragePooling2D':
kw = func.kw
kh = func.kh
pooling_param = {
'pool': 1,
'pad_w': func.pw,
'pad_h': func.ph,
'stride_w': func.sx,
'stride_h': func.sy,
'kernel_w': kw,
'kernel_h': kh,
}
params['type'] = 'Pooling'
params['pooling_param'] = pooling_param
if net is not None:
for k, v in six.iteritems(pooling_param):
setattr(layer.pooling_param, k, v)
elif func.label == 'MaxPoolingND' and func.ndim == 2:
kh, kw = func.ksize
sy, sx = func.stride
ph, pw = func.pad
pooling_param = {
'pool': 0,
'pad_w': pw,
'pad_h': ph,
'stride_w': sx,
'stride_h': sy,
'kernel_w': kw,
'kernel_h': kh,
}
params['type'] = 'Pooling'
params['pooling_param'] = pooling_param
if net is not None:
for k, v in six.iteritems(pooling_param):
setattr(layer.pooling_param, k, v)
elif func.label == 'LocalResponseNormalization':
lrn_param = {
'norm_region': 0, # ACROSS_CHANNELS
'local_size': func.n,
'k': func.k,
'alpha': func.alpha * func.n,
'beta': func.beta,
}
params['type'] = 'LRN'
params['lrn_param'] = lrn_param
if net is not None:
for k, v in six.iteritems(lrn_param):
setattr(layer.lrn_param, k, v)
elif func.label == 'FixedBatchNormalization':
_, gamma, beta, mean, var = func.inputs
batch_norm_param = {'use_global_stats': True, 'eps': func.eps}
params['type'] = 'BatchNorm'
params['bottom'] = params['bottom'][:1]
params['batch_norm_param'] = batch_norm_param
if net is not None:
for k, v in six.iteritems(batch_norm_param):
setattr(layer.batch_norm_param, k, v)
_add_blob(layer, [mean.data.size], mean.data)
_add_blob(layer, [var.data.size], var.data)
_add_blob(layer, [1], numpy.ones((1,), dtype=numpy.float32))
if gamma.data is None and beta.data is None:
pass
else:
bn_name = layer_name + '_bn'
params['name'] = bn_name
params['top'] = [bn_name]
if prototxt is not None:
prototxt.write(self._gen_layer_prototxt(params))
if net is not None:
layer.name = params['name']
layer.type = params['type']
layer.bottom[:] = params['bottom']
layer.top[:] = params['top']
layer.phase = caffe_pb.TEST
del params, layer
params = collections.OrderedDict()
params['type'] = 'Scale'
params['name'] = layer_name
params['bottom'] = [bn_name]
params['top'] = [layer_name]
if net is not None:
layer = net.layer.add()
beta.retain_data()
bias_term = beta.data is not None
scale_param = {
'axis': 1,
'bias_term': bias_term,
}
params['scale_param'] = scale_param
if net is not None:
for k, v in six.iteritems(scale_param):
setattr(layer.scale_param, k, v)
_add_blob(layer, [gamma.data.size], gamma.data)
if bias_term:
_add_blob(layer, [beta.data.size], beta.data)
elif func.label == 'ReLU':
params['type'] = 'ReLU'
elif func.label == 'LeakyReLU':
relu_param = {'negative_slope': func.slope}
params['type'] = 'ReLU'
params['relu_param'] = relu_param
if net is not None:
for k, v in six.iteritems(relu_param):
setattr(layer.relu_param, k, v)
elif func.label == 'Concat':
axis = func.axis
concat_param = {'axis': axis}
params['type'] = 'Concat'
params['concat_param'] = concat_param
if net is not None:
for k, v in six.iteritems(concat_param):
setattr(layer.concat_param, k, v)
elif func.label == 'Softmax':
params['type'] = 'Softmax'
elif func.label == 'Sigmoid':
params['type'] = 'Sigmoid'
elif func.label == 'Reshape':
input_ = func.inputs[0]
parent = input_.creator
parent_layer_name = parent_layer_names[0]
if 'Reshape' in parent_layer_name:
grandparent = parent.inputs[0].creator
parent_layer_name = self._get_parent_name(grandparent)
reshape_param = {'shape': {'dim': list(func.shape)}}
params['type'] = 'Reshape'
params['bottom'] = [parent_layer_name]
params['reshape_param'] = reshape_param
if layer is not None:
dim = reshape_param['shape']['dim']
layer.reshape_param.shape.dim[:] = dim
elif func.label == '_ + _':
params['type'] = 'Eltwise'
else:
raise Exception(
'Cannot convert, name={}, rank={}, label={}, inputs={}'.format(
layer_name, func.rank, func.label, parent_layer_names))
if prototxt is not None:
prototxt.write(self._gen_layer_prototxt(params))
if net is not None:
layer.name = params['name']
layer.type = params['type']
layer.bottom[:] = params['bottom']
layer.top[:] = params['top']
layer.phase = caffe_pb.TEST
def __call__(self, name, inputs, outputs):
dumped_list = _dump_graph(outputs)
f = None
net = None
if self.caffemodel is not None:
net = caffe_pb.NetParameter()
try:
if self.prototxt is not None:
f = open(self.prototxt, 'wt')
f.write('name: "{}"\n'.format(name))
assert len(inputs) == 1
f.write('layer {\n'
' name: "data"\n'
' type: "Input"\n'
' top: "data"\n'
' input_param { shape: {')
for i in inputs[0].shape:
f.write(' dim: ' + str(i))
f.write(' } }\n'
'}\n')
for i in dumped_list:
self.dump_function_object(i, f, net)
finally:
if f is not None:
f.close()
if net is not None:
with open(self.caffemodel, 'wb') as f:
f.write(net.SerializeToString())
if self.debug:
import google.protobuf.text_format
with open(self.caffemodel + '.txt', 'w') as f:
f.write(google.protobuf.text_format.MessageToString(net))
def export(model, args, directory=None,
export_params=True, graph_name='Graph'):
"""(Experimental) Export a computational graph as Caffe format.
Args:
model (~chainer.Chain): The model object you want to export in Caffe
format. It should have :meth:`__call__` method because the second
argument ``args`` is directly given to the model by the ``()``
accessor.
args (list of ~chainer.Variable): The arguments which are given to the
model directly.
directory (str): The directory used for saving the resulting Caffe
model. If None, nothing is saved to the disk.
export_params (bool): If True, this function exports all the parameters
included in the given model at the same time. If False, the
exported Caffe model doesn't include any parameter values.
graph_name (str): A string to be used for the ``name`` field of the
graph in the exported Caffe model.
.. note::
Currently, this function supports networks that created by following
layer functions.
- :func:`~chainer.functions.linear`
- :func:`~chainer.functions.convolution_2d`
- :func:`~chainer.functions.deconvolution_2d`
- :func:`~chainer.functions.max_pooling_2d`
- :func:`~chainer.functions.average_pooling_2d`
- :func:`~chainer.functions.batch_normalization`
- :func:`~chainer.functions.local_response_normalization`
- :func:`~chainer.functions.relu`
- :func:`~chainer.functions.leaky_relu`
- :func:`~chainer.functions.concat`
- :func:`~chainer.functions.softmax`
- :func:`~chainer.functions.reshape`
- :func:`~chainer.functions.add`
This function can export at least following networks.
- GoogLeNet
- ResNet
- VGG
And, this function use testing (evaluation) mode.
.. admonition:: Example
>>> from chainer.exporters import caffe
>>>
>>> class Model(chainer.Chain):
... def __init__(self):
... super(Model, self).__init__()
... with self.init_scope():
... self.l1 = L.Convolution2D(None, 1, 1, 1, 0)
... self.b2 = L.BatchNormalization(1)
... self.l3 = L.Linear(None, 1)
...
... def __call__(self, x):
... h = F.relu(self.l1(x))
... h = self.b2(h)
... return self.l3(h)
...
>>> x = chainer.Variable(np.zeros((1, 10, 10, 10), np.float32))
>>> caffe.export(Model(), [x], None, True, 'test')
"""
assert isinstance(args, (tuple, list))
if len(args) != 1:
raise NotImplementedError()
for i in args:
assert isinstance(i, variable.Variable)
with function.force_backprop_mode(), chainer.using_config('train', False):
output = model(*args)
if isinstance(output, variable.Variable):
output = [output]
assert isinstance(output, (tuple, list))
for i in output:
assert isinstance(i, variable.Variable)
prototxt = None
caffemodel = None
if directory is not None:
prototxt = os.path.join(directory, 'chainer_model.prototxt')
if export_params:
caffemodel = os.path.join(directory, 'chainer_model.caffemodel')
retriever = _RetrieveAsCaffeModel(prototxt, caffemodel)
retriever(graph_name, args, output)
| 18,050
| 35.763747
| 79
|
py
|
chainer
|
chainer-master/chainer/optimizer_hooks/gradient_noise.py
|
import numpy
import chainer
from chainer import cuda
def exponential_decay_noise(xp, shape, dtype, hook, opt):
"""Time-dependent annealed Gaussian noise function from the paper:
`Adding Gradient Noise Improves Learning for Very Deep Networks
<https://arxiv.org/pdf/1511.06807>`_.
"""
std = numpy.sqrt(hook.eta / numpy.power(1 + opt.t, 0.55))
return xp.random.normal(0, std, shape).astype(dtype)
class GradientNoise(object):
"""Optimizer/UpdateRule hook function for adding gradient noise.
This hook function simply adds noise generated by the ``noise_func``
to the gradient. By default it adds time-dependent annealed Gaussian
noise to the gradient at every training step:
.. math::
g_t \\leftarrow g_t + N(0, \\sigma_t^2)
where
.. math::
\\sigma_t^2 = \\frac{\\eta}{(1+t)^\\gamma}
with :math:`\\eta` selected from {0.01, 0.3, 1.0} and
:math:`\\gamma = 0.55`.
Args:
eta (float): Parameter that defines the scale of the noise. For
the default noise function, it is recommended that it be either
0.01, 0.3 or 1.0.
noise_func (function): Noise generating function which by default
is given by `Adding Gradient Noise Improves Learning for Very Deep
Networks <https://arxiv.org/pdf/1511.06807>`_.
Attributes:
~optimizer_hooks.GradientNoise.timing (string): Specifies
when this hook should be called by the
Optimizer/UpdateRule. Valid values are
'pre' (before any updates) and 'post' (after any
updates).
~optimizer_hooks.GradientNoise.call_for_each_param (bool): Specifies
if this hook is called for each parameter (``True``)
or only once (``False``) by an optimizer to
which this hook is registered. This function does
not expect users to switch the value from default one,
which is `True`.
.. versionadded:: 4.0.0
The *timing* parameter.
"""
name = 'GradientNoise'
call_for_each_param = True
timing = 'pre'
def __init__(self, eta, noise_func=exponential_decay_noise):
self.eta = eta
self.noise_func = noise_func
def __call__(self, rule, param):
g = param.grad
if g is None:
return
with chainer.using_device(param.device):
xp = param.device.xp
noise = self.noise_func(xp, g.shape, g.dtype, self, rule)
if xp is cuda.cupy:
kernel = cuda.elementwise(
'T noise', 'T g', 'g += noise', 'gradient_noise')
kernel(noise, g)
else:
g += noise
| 2,840
| 33.228916
| 79
|
py
|
chainer
|
chainer-master/chainer/optimizer_hooks/weight_decay.py
|
import chainer
from chainer import cuda
class WeightDecay(object):
"""Optimizer/UpdateRule hook function for weight decay regularization.
This hook function adds a scaled parameter to the corresponding gradient.
It can be used as a regularization.
Args:
rate (float): Coefficient for the weight decay.
Attributes:
~optimizer_hooks.WeightDecay.rate (float): Coefficient
for the weight decay.
~optimizer_hooks.WeightDecay.timing (string): Specifies
when this hook should be called by the
Optimizer/UpdateRule. Valid values are 'pre'
(before any updates) and 'post' (after any updates).
~optimizer_hooks.WeightDecay.call_for_each_param (bool): Specifies
if this hook is called for each parameter (``True``)
or only once (``False``) by an optimizer to
which this hook is registered. This function does
not expect users to switch the value from default one,
which is `True`.
.. versionadded:: 4.0.0
The *timing* parameter.
"""
name = 'WeightDecay'
call_for_each_param = True
timing = 'pre'
def __init__(self, rate):
self.rate = rate
def __call__(self, rule, param):
p, g = param.data, param.grad
if p is None or g is None:
return
with chainer.using_device(param.device):
rate = self.rate
if param._loss_scale is not None:
rate *= param._loss_scale
if param.device.xp is cuda.cupy:
kernel = cuda.elementwise(
'T p, T decay', 'T g', 'g += decay * p', 'weight_decay')
kernel(p, rate, g)
else:
g += rate * p
| 1,897
| 34.148148
| 79
|
py
|
chainer
|
chainer-master/chainer/optimizer_hooks/lasso.py
|
import chainer
from chainer import cuda
class Lasso(object):
"""Optimizer/UpdateRule hook function for Lasso regularization.
This hook function adds a scaled parameter to the sign of each weight.
It can be used as a regularization.
Args:
rate (float): Coefficient for the weight decay.
Attributes:
~optimizer_hooks.Lasso.rate (float): Coefficient for the weight decay.
~optimizer_hooks.Lasso.timing (string): Specifies
when this hook should be called by
the Optimizer/UpdateRule. Valid values are 'pre'
(before any updates) and 'post' (after any updates).
~optimizer_hooks.Lasso.call_for_each_param (bool): Specifies
if this hook is called for each parameter (``True``)
or only once (``False``) by an optimizer to
which this hook is registered. This function does
not expect users to switch the value from default one,
which is `True`.
.. versionadded:: 4.0.0
The *timing* parameter.
"""
name = 'Lasso'
call_for_each_param = True
timing = 'pre'
def __init__(self, rate):
self.rate = rate
def __call__(self, rule, param):
p, g = param.data, param.grad
if p is None or g is None:
return
with chainer.using_device(param.device):
xp = param.device.xp
sign = xp.sign(p)
if xp is cuda.cupy:
kernel = cuda.elementwise(
'T s, T decay', 'T g', 'g += decay * s', 'lasso')
kernel(sign, self.rate, g)
else:
g += self.rate * sign
| 1,773
| 33.784314
| 79
|
py
|
chainer
|
chainer-master/chainer/optimizer_hooks/__init__.py
|
from chainer.optimizer_hooks.gradient_clipping import GradientClipping # NOQA
from chainer.optimizer_hooks.gradient_hard_clipping import GradientHardClipping # NOQA
from chainer.optimizer_hooks.gradient_lars import GradientLARS # NOQA
from chainer.optimizer_hooks.gradient_noise import GradientNoise # NOQA
from chainer.optimizer_hooks.lasso import Lasso # NOQA
from chainer.optimizer_hooks.weight_decay import WeightDecay # NOQA
| 436
| 61.428571
| 87
|
py
|
chainer
|
chainer-master/chainer/optimizer_hooks/gradient_lars.py
|
import chainer
from chainer import backend
class GradientLARS(object):
"""Optimizer/UpdateRule hook function for layer wise adaptive rate scaling.
See: `Large Batch Training of Convolutional Networks
<https://arxiv.org/abs/1708.03888>`_.
See: `Convergence Analysis of Gradient Descent Algorithms
with Proportional Updates
<https://arxiv.org/abs/1801.03137>`_.
This hook function scales all gradient arrays to fit to the weight norm.
In <https://arxiv.org/abs/1708.03888>,
.. math::
v_{t+1} &= m * v_t + \\gamma * \\lambda *
(\\nabla L(w_t) + \\beta w_t), \\\\
w_{t+1} &= w_{t} - v_{t+1},
where
- :math:`\\gamma` : learning_rate
- :math:`m` : momentum
- :math:`\\beta` : weight_decay
- :math:`\\eta` : lars_coeeficient
- :math:`\\lambda`: local_lr \
:math:`=\\eta * \
\\frac{\\|w_t\\|}{\\|\\nabla L(w_t)\\| + \\beta * \\|w_t\\|}`.
As :math:`lr` in chainer.optimizers.SGD or chainer.optimizers.MomentumSGD
corresponds to :math:`\\gamma * \\eta`, we define :math:`clip\\_rate` as
:math:`\\frac{\\|w_t\\|}{\\|\\nabla L(w_t)\\| + \\beta * \\|w_t\\|}`
and reformulate the aforementioned formula as:
:math:`v_{t+1} \
= m * v_t + lr * clip\\_rate * (\\nabla L(w_t) + \\beta w_t)`
and implement in this way. So you do not set lars_coeeficient.
Args:
threashold (float): If weight norm is more than threshold,
this function scales all gradient arrays to fit weight norm.
(See <https://arxiv.org/abs/1801.03137>)
weight_decay (float): Coefficient for the weight decay.
eps (float): Small value for the numerical stability.
(See <https://arxiv.org/abs/1801.03137>)
Attributes:
~optimizer_hooks.GradientLARS.threashold (float): If weight norm is
more than threshold, this function scales all
gradient arrays to fit weight norm.
(See <https://arxiv.org/abs/1801.03137>)
~optimizer_hooks.GradientLARS.weight_decay (float): Coefficient
for the weight decay.
~optimizer_hooks.GradientLARS.eps (float): Small value for the
numerical stability.
(See <https://arxiv.org/abs/1801.03137>)
~optimizer_hooks.GradientLARS.timing (string): Specifies
when this hook should be called by the
Optimizer/UpdateRule. Valid values are 'pre'
(before any updates) and 'post' (after any updates).
~optimizer_hooks.GradientLARS.call_for_each_param (bool): Specifies
if this hook is called for each parameter (``True``)
or only once (``False``) by an optimizer to
which this hook is registered. This function does
not expect users to switch the value from default one,
which is `True`.
"""
name = 'GradientLARS'
call_for_each_param = True
timing = 'pre'
def __init__(self, threshold=1e-2, weight_decay=0.0, eps=1e-9):
self.threshold = threshold
self.weight_decay = weight_decay
self.eps = eps
def __call__(self, rule, param):
p, g = param.data, param.grad
if p is None or g is None:
return
with chainer.using_device(param.device):
xp = param.device.xp
if xp is backend.chainerx:
# TODO(ecastill): norm in chainerx
p_norm = xp.sqrt(xp.sum(p*p))
g_norm = xp.sqrt(xp.sum(g*g))
else:
# weight norm
p_norm = xp.linalg.norm(p)
# grad norm
g_norm = xp.linalg.norm(g)
local_rate = (p_norm
/ (self.eps + g_norm + self.weight_decay * p_norm))
rate = xp.where(p_norm > self.threshold, local_rate, 1.0)
if xp is backend.cuda:
kernel = backend.cuda.elementwise(
'T p, T rate, T weight_decay',
'T g',
'g += weight_decay * p; g *= rate;',
'lars')
kernel(p, rate, self.weight_decay, g)
else:
g += self.weight_decay * p
g *= rate
| 4,461
| 38.839286
| 79
|
py
|
chainer
|
chainer-master/chainer/optimizer_hooks/gradient_hard_clipping.py
|
import chainer
from chainer import backend
class GradientHardClipping(object):
"""Optimizer/UpdateRule hook function for gradient clipping.
This hook function clips all gradient arrays to be within a lower and upper
bound.
Args:
lower_bound (float): The lower bound of the gradient value.
upper_bound (float): The upper bound of the gradient value.
Attributes:
~optimizer_hooks.GradientHardClipping.lower_bound (float): The
lower bound of the gradient value.
~optimizer_hooks.GradientHardClipping.upper_bound (float): The
upper bound of the gradient value.
~optimizer_hooks.GradientHardClipping.timing (string): Specifies
when this hook should be called by the
Optimizer/UpdateRule. Valid values are 'pre'
(before any updates) and 'post'
(after any updates).
~optimizer_hooks.GradientHardClipping.call_for_each_param (bool): \
Specifies if this hook is called for each parameter
(``True``) or only once (``False``) by an optimizer to
which this hook is registered. This function does
not expect users to switch the value from default one,
which is `True`.
.. versionadded:: 4.0.0
The *timing* parameter.
"""
name = 'GradientHardClipping'
call_for_each_param = True
timing = 'pre'
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def __call__(self, rule, param):
grad = param.grad
if grad is None:
return
with chainer.using_device(param.device):
xp = param.device.xp
# TODO(kshitij12345): Fix when chainerx.clip
# supports kwarg `out`.
if xp == backend.chainerx \
or isinstance(param.grad, backend.intel64.mdarray):
grad[...] = grad.clip(self.lower_bound, self.upper_bound)
else:
# Save on new object allocation when using numpy and cupy
# using kwarg `out`
xp.clip(grad, self.lower_bound, self.upper_bound, out=grad)
| 2,360
| 38.35
| 79
|
py
|
chainer
|
chainer-master/chainer/optimizer_hooks/gradient_clipping.py
|
import collections
import numpy
import six
import chainer
from chainer import backend
def _sum_sqnorm_grads(params):
# Calculates sum of squares of gradients.
# Returns a tuple of the sum and the device of the sum.
# The device will be `None` in multi-device case.
# If the inputs are on a single device, the sum is returned
# as an ndarray on the device, so that no synchronization is taken place.
# If there are multiple devices, accumulation is done on each device
# first, and the total sum is returned as a python float.
# TODO(niboshi): Support and test len(params) == 0
params_grouped = collections.defaultdict(list)
devices_map = {}
# Group params by devices.
for param in params:
device = param.device
params_grouped[device.name].append(param)
devices_map[device.name] = device
# Calculates partial sums for each device.
sq_sums = []
for device_name, paramlist in six.iteritems(params_grouped):
device = devices_map[device_name]
with chainer.using_device(device):
dots = []
for param in paramlist:
g = param.grad
g = g.ravel()
dots.append(g.dot(g))
sq_sums.append(sum(dots))
# Return the total sum.
if len(sq_sums) == 1:
# single device
sqnorm = sq_sums[0]
ret_device = params[0].device
else:
# multi-device
sqnorm = sum([float(s) for s in sq_sums])
ret_device = None
return sqnorm, ret_device
class GradientClipping(object):
"""Optimizer hook function for gradient clipping.
This hook function scales all gradient arrays to fit to the defined L2 norm
threshold.
Args:
threshold (float): L2 norm threshold.
Attributes:
~optimizer_hooks.GradientClipping.threshold (float): L2
norm threshold of gradient norm.
~optimizer_hooks.GradientClipping.timing (string): Specifies
when this hook should be
called by the Optimizer/UpdateRule. Valid values are
'pre' (before any updates) and 'post' (after any
updates).
.. versionadded:: 4.0.0
The *timing* parameter.
"""
name = 'GradientClipping'
timing = 'pre'
def __init__(self, threshold):
self.threshold = threshold
def __call__(self, opt):
sqnorm, device = _sum_sqnorm_grads(list(opt.target.params(False)))
if device is None:
# Assign a dummy device for using_device.
device = backend.CpuDevice()
with chainer.using_device(device):
norm = device.xp.sqrt(sqnorm)
# TODO(niboshi): Could be inf if norm == 0
rate = self.threshold / norm
# In NumPy backend, `rate` is already available on CPU and thus
# can be compared against 1 without extra overhead.
# Otherwise `clip` is used to avoid synchronization.
if device.xp is numpy:
if rate >= 1:
return
else:
rate = rate.clip(None, 1)
for param in opt.target.params(False):
grad = param.grad
with chainer.using_device(param.device):
grad *= rate
| 3,363
| 30.439252
| 79
|
py
|
chainer
|
chainer-master/chainer/iterators/dali_iterator.py
|
from __future__ import division
from chainer.dataset import iterator
from chainer import utils
class DaliIterator(iterator.Iterator):
"""(Experimental) Iterator for DALI pipeline.
Args:
pipeline: DALI pipeline.
repeat (bool): If ``True``, it infinitely loops over the dataset.
Otherwise, it stops iteration at the end of the first epoch.
"""
def __init__(self, pipeline, repeat=True):
utils.experimental('DaliIterator')
self.pipeline = pipeline
self._repeat = repeat
self._is_build = False
self.epoch_size = 1 # dummy
self.reset()
def __next__(self):
if not self._is_build:
self.pipeline.build()
self._is_build = True
self.epoch_size = tuple(self.pipeline.epoch_size().values())[0]
if not self._repeat and self.epoch > 0:
raise StopIteration
self._previous_epoch_detail = self.epoch_detail
i = self.current_position
i_end = i + self.batch_size
N = self.epoch_size
if i_end >= N:
if self._repeat:
self.current_position = i_end - N
else:
self.current_position = 0
self.epoch += 1
self.is_new_epoch = True
else:
self.current_position = i_end
self.is_new_epoch = False
return self.pipeline.run()
next = __next__
@property
def batch_size(self):
return self.pipeline.batch_size
@property
def epoch_detail(self):
return self.epoch + self.current_position / self.epoch_size
@property
def previous_epoch_detail(self):
if self._previous_epoch_detail < 0:
return None
return self._previous_epoch_detail
def serialize(self, serializer):
self.current_position = serializer('current_position',
self.current_position)
self.epoch = serializer('epoch', self.epoch)
self.is_new_epoch = serializer('is_new_epoch', self.is_new_epoch)
try:
self._previous_epoch_detail = serializer(
'previous_epoch_detail', self._previous_epoch_detail)
except KeyError:
# guess previous_epoch_detail for older version
self._previous_epoch_detail = self.epoch + \
(self.current_position - self.batch_size) / self.epoch_size
if self.epoch_detail > 0:
self._previous_epoch_detail = max(
self._previous_epoch_detail, 0.)
else:
self._previous_epoch_detail = -1.
def reset(self):
self.current_position = 0
self.epoch = 0
self.is_new_epoch = False
# use -1 instead of None internally.
self._previous_epoch_detail = -1.
@property
def repeat(self):
return self._repeat
| 2,926
| 28.867347
| 75
|
py
|
chainer
|
chainer-master/chainer/iterators/_statemachine.py
|
import collections
import numpy
IteratorState = collections.namedtuple('IteratorState', (
'current_position', 'epoch', 'is_new_epoch', 'order'))
def iterator_statemachine(state, batch_size, repeat, order_sampler,
dataset_len):
i, epoch, _, order = state
if not repeat and epoch > 0:
return state, None
indices_list = []
n = dataset_len if order is None else len(order)
if repeat and n == 0:
raise ValueError('Epoch size must be positive for an iterator '
'that repeats.')
i_end = i + batch_size
is_new_epoch = False
while i_end >= n:
if order is None:
indices_list.append(numpy.arange(i, n, dtype=numpy.intp))
else:
indices_list.append(order[i:n])
if order is not None:
new_order = order_sampler(order, i)
if len(new_order) != len(order):
raise ValueError('The size of order does not match '
'the size of the previous order.')
order = new_order
epoch += 1
is_new_epoch = True
i = 0
if repeat:
i_end -= n
else:
i_end = 0
break # explicit break in case n == 0
if order is None:
indices_list.append(numpy.arange(i, i_end, dtype=numpy.intp))
else:
indices_list.append(order[i:i_end])
state = IteratorState(i_end, epoch, is_new_epoch, order)
indices = numpy.concatenate(indices_list)
return state, indices
| 1,564
| 26.45614
| 71
|
py
|
chainer
|
chainer-master/chainer/iterators/multithread_iterator.py
|
from __future__ import division
from multiprocessing import pool
import numpy
from chainer.dataset import iterator
from chainer.iterators import _statemachine
from chainer.iterators.order_samplers import ShuffleOrderSampler
class MultithreadIterator(iterator.Iterator):
"""Dataset iterator that loads examples in parallel.
This is an implementation of :class:`~chainer.dataset.Iterator` that loads
examples with worker threads. It uses the standard :mod:`threading`
module to parallelize the loading.
Note that this iterator effectively prefetches the examples for the next
batch asynchronously after the current batch is returned.
This iterator saves ``-1`` instead of ``None`` in snapshots since some
serializers do not support ``None``.
Args:
dataset (~chainer.dataset.Dataset): Dataset to iterate.
batch_size (int): Number of examples within each batch.
repeat (bool): If ``True``, it infinitely loops over the dataset.
Otherwise, it stops iteration at the end of the first epoch.
shuffle (bool): If ``True``, the order of examples is shuffled at the
beginning of each epoch. Otherwise, examples are extracted in the
order of indexes. If ``None`` and no ``order_sampler`` is given,
the behavior is the same as the case with ``shuffle=True``.
n_threads (int): Number of worker threads.
order_sampler (callable): A callable that generates the order
of the indices to sample in the next epoch when a epoch finishes.
This function should take two arguments: the current order
and the current position of the iterator.
This should return the next order. The size of the order
should remain constant.
This option cannot be used when ``shuffle`` is not ``None``.
"""
def __init__(self, dataset, batch_size, repeat=True, shuffle=None,
n_threads=1, order_sampler=None):
self.dataset = dataset
self.batch_size = batch_size
self._repeat = repeat
self._shuffle = shuffle
if self._shuffle is not None:
if order_sampler is not None:
raise ValueError('`shuffle` is not `None` and a custom '
'`order_sampler` is set. Please set '
'`shuffle` to `None` to use the custom '
'order sampler.')
else:
if self._shuffle:
order_sampler = ShuffleOrderSampler()
else:
if order_sampler is None:
order_sampler = ShuffleOrderSampler()
self.order_sampler = order_sampler
self.n_threads = n_threads
self._pool = None
self.reset()
def reset(self):
if self.order_sampler is None:
order = None
else:
order = self.order_sampler(numpy.arange(len(self.dataset)), 0)
self._state = _statemachine.IteratorState(0, 0, False, order)
self._previous_epoch_detail = -1.
# reset internal state
self._next = None
def finalize(self):
pool = self._pool
self._next = None
self._pool = None
if pool is not None:
pool.terminate()
def __next__(self):
if self._next is None:
# load for the first iteration
self._invoke_prefetch()
batch = self._get()
self._invoke_prefetch() # prefetch for the next iteration
return batch
next = __next__
@property
def current_position(self):
return self._state.current_position
@property
def epoch(self):
return self._state.epoch
@property
def is_new_epoch(self):
return self._state.is_new_epoch
@property
def epoch_detail(self):
return self.epoch + self.current_position / self._epoch_size
@property
def previous_epoch_detail(self):
# use -1 instead of None internally.
if self._previous_epoch_detail < 0:
return None
return self._previous_epoch_detail
def serialize(self, serializer):
current_position = serializer(
'current_position', self.current_position)
epoch = serializer('epoch', self.epoch)
is_new_epoch = serializer('is_new_epoch', self.is_new_epoch)
order = serializer('_order', self._state.order)
self._state = _statemachine.IteratorState(
current_position, epoch, is_new_epoch, order)
self._previous_epoch_detail = serializer(
'previous_epoch_detail', self._previous_epoch_detail)
# Old version serialized ``None``.
if self._previous_epoch_detail is None:
self._previous_epoch_detail = -1.
self._next = None
@staticmethod
def _read(args):
dataset, index = args
return dataset[index]
def _invoke_prefetch(self):
assert self._next is None
self._next_state, indices = _statemachine.iterator_statemachine(
self._state, self.batch_size, self.repeat, self.order_sampler,
len(self.dataset))
if indices is None:
self._next = None
else:
if self._pool is None:
self._pool = pool.ThreadPool(self.n_threads)
args = [(self.dataset, index) for index in indices]
self._next = self._pool.map_async(MultithreadIterator._read, args)
def _get(self):
self._previous_epoch_detail = self.epoch_detail
self._state = self._next_state
next = self._next
if next is None:
raise StopIteration
self._next = None
while not next.ready():
next.wait(0.5) # To avoid interruption bug in Python2
batch = [data for data in next.get()]
return batch
@property
def _epoch_size(self):
order = self._state.order
if order is None:
epoch_size = len(self.dataset)
else:
epoch_size = len(order)
return epoch_size
@property
def repeat(self):
return self._repeat
| 6,202
| 32.52973
| 78
|
py
|
chainer
|
chainer-master/chainer/iterators/multiprocess_iterator.py
|
from __future__ import division
import datetime
import multiprocessing
from multiprocessing import sharedctypes # type: ignore
import signal
import sys
import threading
import warnings
import numpy
import six
from chainer.dataset import iterator
from chainer.iterators import _statemachine
from chainer.iterators.order_samplers import ShuffleOrderSampler
_response_time = 0.1
def _raise_timeout_warning():
warnings.warn(
'Stalled dataset is detected. '
'See the documentation of MultiprocessIterator for common causes and '
'workarounds:\n'
'https://docs.chainer.org/en/stable/reference/generated/'
'chainer.iterators.MultiprocessIterator.html',
MultiprocessIterator.TimeoutWarning)
class MultiprocessIterator(iterator.Iterator):
"""Dataset iterator that loads examples in parallel.
This is an implementation of :class:`~chainer.dataset.Iterator` that loads
examples with worker processes. It uses the standard :mod:`multiprocessing`
module to parallelize the loading. The dataset is sent to the worker
processes in the standard way using pickle.
Note that this iterator effectively prefetches the examples for the next
batch asynchronously after the current batch is returned.
This iterator saves ``-1`` instead of ``None`` in snapshots since some
serializers do not support ``None``.
.. note::
When you are using OpenCV somewhere in your code and the
``MultiprocessIterator`` is used in the training code, the
training loop may get stuck at some point. In such situation,
there are several workarounds to prevent the process got stuck.
1. Set the environment variable as follows: ``OMP_NUM_THREADS=1``
2. Add ``cv2.setNumThreads(0)`` right after ``import cv2`` in your
training script.
3. Use :class:`~chainer.iterators.MultithreadIterator` instead of
``MultiprocessIterator``.
Args:
dataset (~chainer.dataset.Dataset): Dataset to iterate.
batch_size (int): Number of examples within each batch.
repeat (bool): If ``True``, it infinitely loops over the dataset.
Otherwise, it stops iteration at the end of the first epoch.
shuffle (bool): If ``True``, the order of examples is shuffled at the
beginning of each epoch. Otherwise, examples are extracted in the
order of indexes. If ``None`` and no ``order_sampler`` is given,
the behavior is the same as the case with ``shuffle=True``.
n_processes (int): Number of worker processes. The number of CPUs is
used by default.
n_prefetch (int): Number of prefetch batches.
shared_mem (int): The size of using shared memory per data.
If ``None``, size is adjusted automatically.
dataset_timeout (float): :class:`MultiprocessIterator.TimeoutWarning`
will be issued after this time in seconds elapsed in each dataset
realization. ``None`` to disable the warning. You can turn this
warning into an error by using :func:`warnings.simplefilter`::
warnings.simplefilter(
'error',
chainer.iterators.MultiprocessIterator.TimeoutWarning)
order_sampler (callable): A callable that generates the order
of the indices to sample in the next epoch when a epoch finishes.
This function should take two arguments: the current order
and the current position of the iterator.
This should return the next order. The size of the order
should remain constant.
This option cannot be used when ``shuffle`` is not ``None``.
maxtasksperchild (int): Number of tasks a worker of prefetch process
can complete before it will exit and be replaced with a fresh
worker process, to enable unused resources to be freed. If
``None``, worker processes will live as long as the pool.
"""
class TimeoutWarning(RuntimeWarning):
pass
_interruption_testing = False # for testing
_finalized = False
_prefetch_loop = None
_comm = None
def __init__(self, dataset, batch_size, repeat=True, shuffle=None,
n_processes=None, n_prefetch=1, shared_mem=None,
order_sampler=None, dataset_timeout=30.0,
maxtasksperchild=None):
self.dataset = dataset
self.batch_size = batch_size
self.repeat = repeat
self.shuffle = shuffle
self.n_processes = n_processes or multiprocessing.cpu_count()
self.n_prefetch = max(n_prefetch, 1)
self.shared_mem = shared_mem
self.dataset_timeout = dataset_timeout
self._maxtasksperchild = maxtasksperchild
if self.shuffle is not None:
if order_sampler is not None:
raise ValueError('`shuffle` is not `None` and a custom '
'`order_sampler` is set. Please set '
'`shuffle` to `None` to use the custom '
'order sampler.')
else:
if self.shuffle:
order_sampler = ShuffleOrderSampler()
else:
if order_sampler is None:
order_sampler = ShuffleOrderSampler()
self.order_sampler = order_sampler
self._initialize_loop()
def _initialize_loop(self):
self._comm = _Communicator(self.n_prefetch, self.dataset_timeout)
self.reset()
self._prefetch_loop = _PrefetchLoop(
self.dataset, self.batch_size, self.repeat,
self.n_processes, self.n_prefetch, self.shared_mem,
self._comm, self.order_sampler,
self._interruption_testing, self._maxtasksperchild)
# defer launching prefetch thread until creating the worker pool,
# not to leave a background thread in forked processes.
def __next__(self):
measure_mode = False
if self._prefetch_loop.thread is None:
if self._prefetch_loop.measure_required():
measure_mode = True
batch, state = self._prefetch_loop.measure(
self.dataset_timeout)
self._prefetch_loop.launch_thread()
if not measure_mode:
batch, state = self._comm.get()
self._previous_epoch_detail = self.epoch_detail
self._state = state
if batch is None:
raise StopIteration
else:
return batch
next = __next__
def finalize(self):
if self._finalized:
return
if self._comm is not None:
self._comm.terminate()
if self._prefetch_loop is not None:
self._prefetch_loop.terminate()
self._comm = None
self._prefetch_loop = None
self._finalized = True
def __copy__(self):
# This function is implemented for backward compatibility.
# Please use `reset` normally.
other = MultiprocessIterator(
self.dataset, self.batch_size, self.repeat, shuffle=None,
n_processes=self.n_processes, n_prefetch=self.n_prefetch,
shared_mem=self.shared_mem, order_sampler=self.order_sampler)
other._reset_state(self.current_position, self.epoch,
self.is_new_epoch, self._state.order)
other._previous_epoch_detail = self._previous_epoch_detail
return other
@property
def current_position(self):
return self._state.current_position
@property
def epoch(self):
return self._state.epoch
@property
def is_new_epoch(self):
return self._state.is_new_epoch
@property
def epoch_detail(self):
return self.epoch + self.current_position / self._epoch_size
@property
def previous_epoch_detail(self):
if self._previous_epoch_detail < 0:
return None
return self._previous_epoch_detail
def serialize(self, serializer):
current_position = serializer('current_position',
self.current_position)
epoch = serializer('epoch', self.epoch)
is_new_epoch = serializer('is_new_epoch', self.is_new_epoch)
order = self._state.order.copy()
try:
serializer('order', order)
except KeyError:
serializer('_order', order)
self._reset_state(current_position, epoch, is_new_epoch, order)
try:
self._previous_epoch_detail = serializer(
'previous_epoch_detail', self._previous_epoch_detail)
except KeyError:
# guess previous_epoch_detail for older version
self._previous_epoch_detail = self.epoch + \
(self.current_position - self.batch_size) / self._epoch_size
if self.epoch_detail > 0:
self._previous_epoch_detail = max(
self._previous_epoch_detail, 0.)
else:
self._previous_epoch_detail = -1.
def reset(self):
if self.order_sampler is None:
order = None
else:
order = self.order_sampler(numpy.arange(len(self.dataset)), 0)
self._reset_state(0, 0, False, order)
self._previous_epoch_detail = -1.
def _reset_state(self, current_position, epoch, is_new_epoch, order):
if self._finalized:
raise NotImplementedError(
'Reset of finalized MultiProcessIterator is currently not '
'supported.')
self._state = _statemachine.IteratorState(
current_position, epoch, is_new_epoch, order)
self._comm.reset(self._state)
@property
def _epoch_size(self):
order = self._state.order
if order is None:
epoch_size = len(self.dataset)
else:
epoch_size = len(order)
return epoch_size
def __getstate__(self):
# We trick the serializer to fill a dict for us
# this allows us to use the same code for both
# chainer and pickle serializers
state = {}
self.serialize(lambda k, v: state.__setitem__(k, v))
self._reset_state(self.current_position, self.epoch,
self.is_new_epoch, state['order'])
# Unpickling resets the instance without calling __init__
# Chainer serializers dumps the state in an existing
# object hence we need to save the initial parameters too
init = self.__dict__.copy()
del init['_comm']
del init['_state']
del init['_prefetch_loop']
# TODO(ecastill): When pickling this object there is the risk to copy
# the entire dataset. If the dataset is entirely in memory
# it can be duplicated when spawning new processes.
state['init'] = init
return state
def __setstate__(self, state):
self.__dict__.update(state['init'])
self._initialize_loop()
# Iterator state is restored after initialization
self._reset_state(state['current_position'], state['epoch'],
state['is_new_epoch'], state['order'])
self._previous_epoch_detail = state['previous_epoch_detail']
class _Communicator(object):
STATUS_CONTINUE = 0
STATUS_RESET = 1
STATUS_TERMINATE = 2
def __init__(self, n_prefetch, dataset_timeout):
self.n_prefetch = n_prefetch
self.dataset_timeout = dataset_timeout
self._lock = threading.Lock()
self._not_empty_cond = threading.Condition(self._lock)
self._not_full_cond = threading.Condition(self._lock)
self._batch_queue = []
self._status = _Communicator.STATUS_CONTINUE
self._reset_count = 0
@property
def is_terminated(self):
with self._lock:
return self._status == _Communicator.STATUS_TERMINATE
# called from iterator
def get(self):
with self._lock:
start = datetime.datetime.now()
while not self._batch_queue:
self._not_empty_cond.wait(_response_time)
dt = datetime.datetime.now() - start
if (self.dataset_timeout is not None
and dt > datetime.timedelta(
seconds=self.dataset_timeout)):
_raise_timeout_warning()
batch, prefetch_state = self._batch_queue.pop(0)
self._not_full_cond.notify()
return batch, prefetch_state
# called from iterator
def reset(self, prefetch_state):
with self._lock:
self._status = _Communicator.STATUS_RESET
self._prefetch_state = prefetch_state
self._batch_queue = []
self._not_full_cond.notify()
self._reset_count += 1
# called from iterator
def terminate(self):
with self._lock:
self._status = _Communicator.STATUS_TERMINATE
self._batch_queue = []
self._not_full_cond.notify()
self._reset_count += 1
# called from thread
def check(self):
with self._lock:
status = self._status
self._status = _Communicator.STATUS_CONTINUE
prefetch_state = None
if status == _Communicator.STATUS_RESET:
prefetch_state = self._prefetch_state
return status, prefetch_state, self._reset_count
# called from thread
def put(self, batch, prefetch_state, reset_count):
with self._lock:
if len(self._batch_queue) == self.n_prefetch:
self._not_full_cond.wait()
if reset_count == self._reset_count:
self._batch_queue.append((batch, prefetch_state))
self._not_empty_cond.notify()
class _PrefetchLoop(object):
_thread = None
_pool = None
_terminating = False
def __init__(self, dataset, batch_size, repeat,
n_processes, n_prefetch, mem_size, comm,
order_sampler,
_interruption_testing, maxtasksperchild):
self.dataset = dataset
self.batch_size = batch_size
self.repeat = repeat
self.n_processes = n_processes
self.mem_size = mem_size
self._comm = comm
self.order_sampler = order_sampler
self.maxtasksperchild = maxtasksperchild
self._allocate_shared_memory()
self._interruption_testing = _interruption_testing
def terminate(self):
self._terminating = True
# Terminate the thread first because it depends on the pool.
if self._thread is not None:
while self._thread.is_alive():
self._thread.join(_response_time)
if self._pool is not None:
self._pool.terminate()
self._thread = None
self._pool = None
@property
def thread(self):
return self._thread
def measure_required(self):
return self.mem_size is None
def measure(self, dataset_timeout):
# dataset_timeout: timeout in seconds or None
status, prefetch_state, _ = self._comm.check()
if status == _Communicator.STATUS_RESET:
self.prefetch_state = prefetch_state
self.prefetch_state, indices = _statemachine.iterator_statemachine(
self.prefetch_state, self.batch_size, self.repeat,
self.order_sampler, len(self.dataset))
if indices is None: # stop iteration
batch = None
else:
batch_ret = [None]
def fetch_batch():
batch_ret[0] = [self.dataset[idx] for idx in indices]
if dataset_timeout is None:
# Timeout is not set: fetch synchronously
fetch_batch()
else:
# Timeout is set: fetch asynchronously and watch for timeout
thr = threading.Thread(target=fetch_batch)
thr.daemon = True
thr.start()
thr.join(dataset_timeout)
if thr.is_alive():
_raise_timeout_warning()
thr.join()
batch = batch_ret[0]
self.mem_size = max(map(_measure, batch))
self._allocate_shared_memory()
return batch, self.prefetch_state
def _allocate_shared_memory(self):
if self.measure_required():
self.mem_bulk = None
else:
self.mem_bulk = \
sharedctypes.RawArray('b', self.batch_size * self.mem_size)
def launch_thread(self):
self._pool = multiprocessing.Pool(
processes=self.n_processes,
initializer=_fetch_setup,
initargs=(self.dataset, self.mem_size, self.mem_bulk),
maxtasksperchild=self.maxtasksperchild)
if self._interruption_testing:
pids = self._pool.map(_report_pid, range(self.n_processes))
print(' '.join(map(str, pids)))
sys.stdout.flush()
thread = threading.Thread(target=self._run, name='prefetch_loop')
thread.setDaemon(True)
thread.start()
self._thread = thread
return thread
def _run(self):
# The entry routine of the prefetch thread.
alive = True
try:
while alive:
if self._terminating:
break
alive = self._task()
finally:
self._pool.close()
self._pool.join()
def _task(self):
# Do a single task in the prefetch thread.
# Returns a bool indicating whether the loop should continue running.
status, prefetch_state, reset_count = self._comm.check()
if status == _Communicator.STATUS_RESET:
self.prefetch_state = prefetch_state
elif status == _Communicator.STATUS_TERMINATE:
return False # stop loop
self.prefetch_state, indices = _statemachine.iterator_statemachine(
self.prefetch_state, self.batch_size, self.repeat,
self.order_sampler, len(self.dataset))
if indices is None: # stop iteration
batch = None
else:
future = self._pool.map_async(_fetch_run, enumerate(indices))
while True:
try:
data_all = future.get(_response_time)
except multiprocessing.TimeoutError:
if self._comm.is_terminated:
return False
else:
break
batch = [_unpack(data, self.mem_bulk) for data in data_all]
self._comm.put(batch, self.prefetch_state, reset_count)
return True
# Using `parameterized` function (e.g. bound method) with Pool is tricky due to
# restrictions imposed by Pickle. Picklable types differ across versions.
# Just using top-level function with globals seems to be safest.
# it doesn't mean thread safety broken or global variables visible;
# notice that each process uses different address space.
# To make static linter happy, we first initialize global variables.
_fetch_dataset = None
_fetch_mem_size = None
_fetch_mem_bulk = None
def _fetch_setup(dataset, mem_size, mem_bulk):
global _fetch_dataset, _fetch_mem_size, _fetch_mem_bulk
signal.signal(signal.SIGINT, signal.SIG_IGN)
_fetch_dataset = dataset
_fetch_mem_size = mem_size
_fetch_mem_bulk = mem_bulk
def _fetch_run(inputs):
i, index = inputs
data = _fetch_dataset[index]
if _fetch_mem_bulk is not None:
offset = i * _fetch_mem_size
limit = offset + _fetch_mem_size
data = _pack(data, _fetch_mem_bulk, offset, limit)
return data
def _report_pid(_): # for testing
return multiprocessing.current_process().pid
class _PackedNdarray(object):
def __init__(self, array, mem, offset):
self.shape = array.shape
self.dtype = array.dtype
self.nbytes = array.nbytes
self.size = array.size
self.offset = offset
total = self.offset + self.nbytes
if total > len(mem):
raise ValueError(
'Shared memory size is too small. expect:{}, actual:{}'.format(
total, len(mem)))
target = numpy.frombuffer(mem, self.dtype, self.size, self.offset)
target[...] = array.ravel()
def unpack(self, mem):
ret = numpy.frombuffer(mem, self.dtype, self.size, self.offset)
ret = ret.reshape(self.shape).copy()
return ret
def _measure(data):
expect = 0
t = type(data)
if t is tuple or t is list or t is dict:
for v in data:
if isinstance(v, numpy.ndarray):
expect += v.nbytes
return expect
def _pack(data, mem, offset, limit):
if len(mem) == 0:
return data
t = type(data)
over = False
if t is tuple or t is list:
ret = []
for v in data:
if isinstance(v, numpy.ndarray):
if v.nbytes + offset > limit:
over = True
else:
v = _PackedNdarray(v, mem, offset)
offset += v.nbytes
ret.append(v)
data = t(ret)
elif t is dict:
ret = {}
for k, v in six.iteritems(data):
if isinstance(v, numpy.ndarray):
if v.nbytes + offset > limit:
over = True
else:
v = _PackedNdarray(v, mem, offset)
offset += v.nbytes
ret[k] = v
data = ret
elif t is numpy.ndarray:
if data.nbytes + offset > limit:
over = True
else:
data = _PackedNdarray(data, mem, offset)
offset += data.nbytes
if over:
expect = _measure(data)
warnings.warn(
'Shared memory size is too small.\n' +
'Please set shared_mem option for MultiprocessIterator.\n' +
'Expect shared memory size: {} bytes.\n'.format(expect) +
'Actual shared memory size: {} bytes.'.format(limit - offset),
UserWarning)
return data
def _unpack(data, mem):
if len(mem) == 0:
return data
t = type(data)
if t is tuple or t is list:
ret = []
for v in data:
if isinstance(v, _PackedNdarray):
v = v.unpack(mem)
ret.append(v)
data = t(ret)
elif t is dict:
ret = {}
for k, v in six.iteritems(data):
if isinstance(v, _PackedNdarray):
v = v.unpack(mem)
ret[k] = v
data = ret
elif t is _PackedNdarray:
data = data.unpack(mem)
return data
| 22,878
| 33.98318
| 79
|
py
|
chainer
|
chainer-master/chainer/iterators/__init__.py
|
# import classes and functions
from chainer.iterators.multiprocess_iterator import MultiprocessIterator # NOQA
from chainer.iterators.multithread_iterator import MultithreadIterator # NOQA
from chainer.iterators.serial_iterator import SerialIterator # NOQA
from chainer.iterators.dali_iterator import DaliIterator # NOQA
from chainer.iterators.order_samplers import OrderSampler # NOQA
from chainer.iterators.order_samplers import ShuffleOrderSampler # NOQA
| 466
| 45.7
| 80
|
py
|
chainer
|
chainer-master/chainer/iterators/serial_iterator.py
|
from __future__ import division
import numpy
from chainer.dataset import iterator
from chainer.iterators import _statemachine
from chainer.iterators.order_samplers import ShuffleOrderSampler
class SerialIterator(iterator.Iterator):
"""Dataset iterator that serially reads the examples.
This is a simple implementation of :class:`~chainer.dataset.Iterator`
that just visits each example in either the order of indexes or a shuffled
order.
To avoid unintentional performance degradation, the ``shuffle`` option is
set to ``True`` by default. For validation, it is better to set it to
``False`` when the underlying dataset supports fast slicing. If the
order of examples has an important meaning and the updater depends on the
original order, this option should be set to ``False``.
This iterator saves ``-1`` instead of ``None`` in snapshots since some
serializers do not support ``None``.
Args:
dataset: Dataset to iterate.
batch_size (int): Number of examples within each batch.
repeat (bool): If ``True``, it infinitely loops over the dataset.
Otherwise, it stops iteration at the end of the first epoch.
shuffle (bool): If ``True``, the order of examples is shuffled at the
beginning of each epoch. Otherwise, examples are extracted in the
order of indexes. If ``None`` and no ``order_sampler`` is given,
the behavior is the same as the case with ``shuffle=True``.
order_sampler (callable): A callable that generates the order
of the indices to sample in the next epoch when a epoch finishes.
This function should take two arguments: the current order
and the current position of the iterator.
This should return the next order. The size of the order
should remain constant.
This option cannot be used when ``shuffle`` is not ``None``.
"""
def __init__(self, dataset, batch_size,
repeat=True, shuffle=None, order_sampler=None):
self.dataset = dataset
self.batch_size = batch_size
self._repeat = repeat
self._shuffle = shuffle
if self._shuffle is not None:
if order_sampler is not None:
raise ValueError('`shuffle` is not `None` and a custom '
'`order_sampler` is set. Please set '
'`shuffle` to `None` to use the custom '
'order sampler.')
else:
if self._shuffle:
order_sampler = ShuffleOrderSampler()
else:
if order_sampler is None:
order_sampler = ShuffleOrderSampler()
self.order_sampler = order_sampler
self.reset()
def __next__(self):
self._previous_epoch_detail = self.epoch_detail
self._state, indices = _statemachine.iterator_statemachine(
self._state, self.batch_size, self.repeat, self.order_sampler,
len(self.dataset))
if indices is None:
raise StopIteration
batch = [self.dataset[index] for index in indices]
return batch
next = __next__
@property
def current_position(self):
return self._state.current_position
@property
def epoch(self):
return self._state.epoch
@property
def is_new_epoch(self):
return self._state.is_new_epoch
@property
def epoch_detail(self):
return self.epoch + self.current_position / self._epoch_size
@property
def previous_epoch_detail(self):
# use -1 instead of None internally.
if self._previous_epoch_detail < 0:
return None
return self._previous_epoch_detail
def serialize(self, serializer):
current_position = serializer('current_position',
self.current_position)
epoch = serializer('epoch', self.epoch)
is_new_epoch = serializer('is_new_epoch', self.is_new_epoch)
order = self._state.order
if order is not None:
try:
serializer('order', order)
except KeyError:
serializer('_order', order)
self._state = _statemachine.IteratorState(
current_position, epoch, is_new_epoch, order)
try:
self._previous_epoch_detail = serializer(
'previous_epoch_detail', self._previous_epoch_detail)
except KeyError:
# guess previous_epoch_detail for older version
self._previous_epoch_detail = self.epoch + \
(self.current_position - self.batch_size) / self._epoch_size
if self.epoch_detail > 0:
self._previous_epoch_detail = max(
self._previous_epoch_detail, 0.)
else:
self._previous_epoch_detail = -1.
def reset(self):
if self.order_sampler:
order = self.order_sampler(
numpy.arange(len(self.dataset)), 0)
else:
order = None
self._state = _statemachine.IteratorState(0, 0, False, order)
self._previous_epoch_detail = -1.
@property
def _epoch_size(self):
order = self._state.order
if order is None:
epoch_size = len(self.dataset)
else:
epoch_size = len(order)
return epoch_size
@property
def repeat(self):
return self._repeat
| 5,543
| 35.473684
| 78
|
py
|
chainer
|
chainer-master/chainer/iterators/order_samplers.py
|
import numpy
class OrderSampler(object):
"""Base class of all order samplers.
Every order sampler subclass has to provide a method
:meth:`__call__`.
This method is called by an iterator before a new epoch,
and it should return a new index order for the next epoch.
"""
def __call__(self, current_order, current_position):
"""Sample the next order.
Args:
current_order (numpy.ndarray): 1-D array of indices.
The length should be the same as the dataset to sample
data from.
current_position (int): The current position of an iterator.
Returns:
numpy.ndarray:
1-D array of indices. This is the order in which
examples are sampled from a dataset in the next epoch.
"""
raise NotImplementedError
class ShuffleOrderSampler(OrderSampler):
"""Sampler that generates random orders.
This is expected to be used together with Chainer's iterators.
An order sampler is called by an iterator every epoch.
The two initializations below create basically the same objects.
>>> dataset = [(1, 2), (3, 4)]
>>> it = chainer.iterators.MultiprocessIterator(dataset, 1, shuffle=True)
>>> it = chainer.iterators.MultiprocessIterator(
... dataset, 1, order_sampler=chainer.iterators.ShuffleOrderSampler())
Args:
random_state (numpy.random.RandomState): Pseudo-random number
generator.
"""
def __init__(self, random_state=None):
if random_state is None:
random_state = numpy.random.random.__self__
self._random = random_state
def __call__(self, current_order, current_position):
return self._random.permutation(len(current_order))
| 1,793
| 28.9
| 78
|
py
|
chainer
|
chainer-master/chainer/optimizers/smorms3.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import optimizer
from chainer import types
if types.TYPE_CHECKING:
import typing_extensions as tpe
class SMORMS3Hyperparameter(tpe.Protocol):
"""Protocol class for hyperparameter of Simon Funk's SMORMS3.
This is only for PEP 544 compliant static type checkers.
"""
lr = None # type: float
eps = None # type: float
_default_hyperparam = optimizer.Hyperparameter() # type: SMORMS3Hyperparameter # NOQA
_default_hyperparam.lr = 0.001
_default_hyperparam.eps = 1e-16
class SMORMS3Rule(optimizer.UpdateRule):
"""Update rule for Simon Funk's SMORMS3.
See :class:`~chainer.optimizers.SMORMS3` for the default values of the
hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
lr (float): Learning rate.
eps (float): Small value for the numerical stability.
"""
is_elementwise = True
_kernel = None
def __init__(self, parent_hyperparam=None, lr=None, eps=None):
super(SMORMS3Rule, self).__init__(
parent_hyperparam or _default_hyperparam)
if lr is not None:
self.hyperparam.lr = lr
if eps is not None:
self.hyperparam.eps = eps
def init_state(self, param):
with chainer.using_device(param.device):
xp = param.device.xp
self.state['mem'] = xp.ones_like(param.data)
self.state['g'] = xp.zeros_like(param.data)
self.state['g2'] = xp.zeros_like(param.data)
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
mem, g, g2 = self.state['mem'], self.state['g'], self.state['g2']
r = 1 / (mem + 1)
g = (1 - r) * g + r * grad
g2 = (1 - r) * g2 + r * grad * grad
x = g * g / (g2 + self.hyperparam.eps)
param.data -= grad * numpy.minimum(x, self.hyperparam.lr) \
/ (numpy.sqrt(g2) + self.hyperparam.eps)
mem = 1 + mem * (1 - x)
self.state['mem'], self.state['g'], self.state['g2'] = mem, g, g2
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
if SMORMS3Rule._kernel is None:
SMORMS3Rule._kernel = cuda.elementwise(
'T grad, T lr, T eps',
'T param, T mem, T g, T g2',
'''T r, x;
r = 1 / (mem + 1);
g = (1 - r) * g + r * grad;
g2 = (1 - r) * g2 + r * grad * grad;
x = g * g / (g2 + eps);
param -= grad * min(lr, x) / (sqrt(g2) + eps);
mem = 1 + mem * (1 - x)
''',
'smorms3')
SMORMS3Rule._kernel(
grad, self.hyperparam.lr, self.hyperparam.eps, param.data,
self.state['mem'], self.state['g'], self.state['g2'])
class SMORMS3(optimizer.GradientMethod):
"""Simon Funk's SMORMS3.
See http://sifter.org/~simon/journal/20150420.html.
Args:
lr (float): Learning rate.
eps (float): Small value for the numerical stability.
"""
def __init__(self, lr=_default_hyperparam.lr, eps=_default_hyperparam.eps):
super(SMORMS3, self).__init__()
self.hyperparam.lr = lr
self.hyperparam.eps = eps
lr = optimizer.HyperparameterProxy('lr')
eps = optimizer.HyperparameterProxy('eps')
def create_update_rule(self):
return SMORMS3Rule(self.hyperparam)
| 3,641
| 29.605042
| 86
|
py
|
chainer
|
chainer-master/chainer/optimizers/momentum_sgd.py
|
import chainer
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import optimizer
from chainer import types
if types.TYPE_CHECKING:
import typing_extensions as tpe
class MomentumSGDHyperparameter(tpe.Protocol):
"""Protocol class for hyperparameter of classical momentum SGD.
This is only for PEP 544 compliant static type checkers.
"""
lr = None # type: float
momentum = None # type: float
_default_hyperparam = optimizer.Hyperparameter() # type: MomentumSGDHyperparameter # NOQA
_default_hyperparam.lr = 0.01
_default_hyperparam.momentum = 0.9
class MomentumSGDRule(optimizer.UpdateRule):
"""Update rule for the classical momentum SGD.
See :class:`~chainer.optimizers.MomentumSGD` for the default values of the
hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
lr (float): Learning rate.
momentum (float): Exponential decay rate of the first order moment.
"""
is_elementwise = True
_kernel = None
def __init__(self, parent_hyperparam=None, lr=None, momentum=None):
super(MomentumSGDRule, self).__init__(
parent_hyperparam or _default_hyperparam)
if lr is not None:
self.hyperparam.lr = lr
if momentum is not None:
self.hyperparam.momentum = momentum
def init_state(self, param):
with chainer.using_device(param.device):
xp = param.device.xp
self.state['v'] = xp.zeros_like(param.data)
# For iDeep
if isinstance(param.data, intel64.mdarray):
self.state['v'] = intel64.ideep.array(
self.state['v'], itype=intel64.ideep.wgt_array)
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
v = self.state['v']
if isinstance(v, intel64.mdarray):
v.inplace_axpby(self.hyperparam.momentum, -
self.hyperparam.lr, grad)
param.data += v
else:
v *= self.hyperparam.momentum
v -= self.hyperparam.lr * grad
param.data += v
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
if MomentumSGDRule._kernel is None:
MomentumSGDRule._kernel = cuda.elementwise(
'T grad, T lr, T momentum',
'T param, T v',
'''v = momentum * v - lr * grad;
param += v;''',
'momentum_sgd')
MomentumSGDRule._kernel(
grad, self.hyperparam.lr, self.hyperparam.momentum, param.data,
self.state['v'])
class MomentumSGD(optimizer.GradientMethod):
"""Momentum SGD optimizer.
Args:
lr (float): Learning rate.
momentum (float): Exponential decay rate of the first order moment.
"""
def __init__(self, lr=_default_hyperparam.lr,
momentum=_default_hyperparam.momentum):
super(MomentumSGD, self).__init__()
self.hyperparam.lr = lr
self.hyperparam.momentum = momentum
lr = optimizer.HyperparameterProxy('lr')
momentum = optimizer.HyperparameterProxy('momentum')
def create_update_rule(self):
return MomentumSGDRule(self.hyperparam)
| 3,423
| 29.571429
| 90
|
py
|
chainer
|
chainer-master/chainer/optimizers/sgd.py
|
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import optimizer
from chainer import types
if types.TYPE_CHECKING:
import typing_extensions as tpe
class SGDHyperparameter(tpe.Protocol):
"""Protocol class for hyperparameter of vanilla stochastic gradient descent.
This is only for PEP 544 compliant static type checkers.
"""
lr = None # type: float
_default_hyperparam = optimizer.Hyperparameter() # type: SGDHyperparameter # NOQA
_default_hyperparam.lr = 0.01
class SGDRule(optimizer.UpdateRule):
"""Update rule of vanilla stochastic gradient descent.
See :class:`~chainer.optimizers.SGD` for the default values of the
hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
lr (float): Learning rate.
"""
is_elementwise = True
_kernel = None
def __init__(self, parent_hyperparam=None, lr=None):
super(SGDRule, self).__init__(
parent_hyperparam or _default_hyperparam)
if lr is not None:
self.hyperparam.lr = lr
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
if isinstance(param.data, intel64.mdarray):
param.data.inplace_axpby(1.0, -self.hyperparam.lr, grad)
else:
param.data -= self.hyperparam.lr * grad
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
if SGDRule._kernel is None:
SGDRule._kernel = cuda.elementwise(
'T grad, T lr', 'T param',
'param -= lr * grad', 'sgd')
SGDRule._kernel(grad, self.hyperparam.lr, param.data)
class SGD(optimizer.GradientMethod):
"""Vanilla Stochastic Gradient Descent.
Args:
lr (float): Learning rate.
"""
def __init__(self, lr=_default_hyperparam.lr):
super(SGD, self).__init__()
self.hyperparam.lr = lr
lr = optimizer.HyperparameterProxy('lr')
def create_update_rule(self):
return SGDRule(self.hyperparam)
| 2,194
| 25.768293
| 84
|
py
|
chainer
|
chainer-master/chainer/optimizers/corrected_momentum_sgd.py
|
import chainer
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import optimizer
from chainer import types
if types.TYPE_CHECKING:
import typing_extensions as tpe
class CorrectedMomentumSGDHyperparameter(tpe.Protocol):
"""Protocol class for hyperparameter of corrected momentum SGD.
This is only for PEP 544 compliant static type checkers.
"""
lr = None # type: float
momentum = None # type: float
_default_hyperparam = optimizer.Hyperparameter() # type: CorrectedMomentumSGDHyperparameter # NOQA
_default_hyperparam.lr = 0.01
_default_hyperparam.momentum = 0.9
class CorrectedMomentumSGDRule(optimizer.UpdateRule):
"""Update rule for the corrected momentum SGD.
See :class:`~chainer.optimizers.CorrectedMomentumSGD` for the default
values of the hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
lr (float): Learning rate.
momentum (float): Exponential decay rate of the first order moment.
"""
is_elementwise = True
def __init__(self, parent_hyperparam=None, lr=None, momentum=None):
super(CorrectedMomentumSGDRule, self).__init__(
parent_hyperparam or _default_hyperparam)
if lr is not None:
self.hyperparam.lr = lr
if momentum is not None:
self.hyperparam.momentum = momentum
def init_state(self, param):
with chainer.using_device(param.device):
self.state['v'] = param.device.xp.zeros_like(param.data)
# For iDeep
if isinstance(param.data, intel64.mdarray):
self.state['v'] = intel64.ideep.array(
self.state['v'], itype=intel64.ideep.wgt_array)
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
v = self.state['v']
if isinstance(v, intel64.mdarray):
v.inplace_axpby(self.hyperparam.momentum,
-1, grad)
param.data += self.hyperparam.lr * v
else:
v *= self.hyperparam.momentum
v -= grad
param.data += self.hyperparam.lr * v
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
cuda.elementwise(
'T grad, T lr, T momentum',
'T param, T v',
'''v = momentum * v - grad;
param += lr * v;''',
'momentum_sgd')(
grad, self.hyperparam.lr, self.hyperparam.momentum,
param.data, self.state['v'])
class CorrectedMomentumSGD(optimizer.GradientMethod):
"""Momentum SGD optimizer.
This implements momentum correction discussed in the third section of
`Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour
<https://arxiv.org/abs/1706.02677>`_.
:class:`~chainer.optimizers.MomentumSGD` implements the equation (10) of
the paper. This optimizer implements the equation (9).
To get better understanding between the two methods,
we show the equivalence between the equation (9) and modification of
the equation (10) that takes momentum correction into account.
First, we set :math:`v_{t} = \\eta_{t} u_t`.
We substitute this relation to the equation (10).
.. math::
v_{t+1} &= m\\frac{\\eta_{t+1}}{\\eta_{t}}v_t + \\eta_{t+1}g_t \\\\
&= m\\frac{\\eta_{t+1}}{\\eta_{t}}\\eta_{t}u_t +
\\eta_{t+1}g_t \\\\
&= \\eta_{t+1}(m u_t + g_t) \\\\
From this result, we derive :math:`u_{t+1} = m u_t + g_t`, which is how
update tensors are calculated by
:class:`~chainer.optimizers.CorrectedMomentumSGD`. Thus, the equivalence
is shown.
Args:
lr (float): Learning rate.
momentum (float): Exponential decay rate of the first order moment.
"""
def __init__(self, lr=_default_hyperparam.lr,
momentum=_default_hyperparam.momentum):
super(CorrectedMomentumSGD, self).__init__()
self.hyperparam.lr = lr
self.hyperparam.momentum = momentum
lr = optimizer.HyperparameterProxy('lr')
momentum = optimizer.HyperparameterProxy('momentum')
def create_update_rule(self):
return CorrectedMomentumSGDRule(self.hyperparam)
| 4,407
| 32.393939
| 99
|
py
|
chainer
|
chainer-master/chainer/optimizers/msvag.py
|
from __future__ import division
import numpy
import chainer
from chainer.backends import cuda
from chainer import optimizer
from chainer import types
if types.TYPE_CHECKING:
import typing_extensions as tpe
class MSVAGHyperparameter(tpe.Protocol):
"""Protocol class for hyperparameter of M-SVAG.
This is only for PEP 544 compliant static type checkers.
"""
lr = None # type: float
beta = None # type: float
eta = None # type: float
weight_decay_rate = None # type: float
_default_hyperparam = optimizer.Hyperparameter() # type: MSVAGHyperparameter # NOQA
_default_hyperparam.lr = 0.1
_default_hyperparam.beta = 0.9
_default_hyperparam.eta = 1.0
_default_hyperparam.weight_decay_rate = 0
class MSVAGRule(optimizer.UpdateRule):
"""Update rule of the M-SVAG optimization algorithm.
See: `Dissecting Adam: The Sign, Magnitude and Variance of Stochastic
Gradients <https://arxiv.org/abs/1705.07774>`_
Modified for proper weight decay.
See: `Fixing Weight Decay Regularization in Adam
<https://openreview.net/forum?id=rk6qdGgCZ>`_
See :class:`~chainer.optimizers.MSVAG` for the default values
of the hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
lr (float): Learning rate.
beta (float): Exponential decay rate of the first and second order
moment.
eta (float): Schedule multiplier, can be used for warm restarts.
weight_decay_rate (float): Weight decay rate.
"""
is_elementwise = True
def __init__(self, parent_hyperparam=None,
lr=None, beta=None,
eta=None, weight_decay_rate=None):
super(MSVAGRule, self).__init__(
parent_hyperparam or _default_hyperparam)
if lr is not None:
self.hyperparam.lr = lr
if beta is not None:
self.hyperparam.beta = beta
if eta is not None:
self.hyperparam.eta = eta
if weight_decay_rate is not None:
self.hyperparam.weight_decay_rate = weight_decay_rate
self.beta_power = self.hyperparam.beta
def init_state(self, param):
with chainer.using_device(param.device):
xp = param.device.xp
self.state['m'] = xp.zeros_like(param.data)
self.state['v'] = xp.zeros_like(param.data)
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
hp = self.hyperparam
m, v = self.state['m'], self.state['v']
rho = (((1.0 - hp.beta) ** 2) * (1.0 - self.beta_power ** 2) /
(((1.0 - self.beta_power) ** 2) * (1.0 - hp.beta ** 2)))
rho = min(rho, 0.9999)
m += (1 - hp.beta) * (grad - m)
v += (1 - hp.beta) * (grad * grad - v)
mt = m / (1 - self.beta_power)
vt = v / (1 - self.beta_power)
mt2 = mt ** 2
s = (vt - mt2) / (1 - rho)
factor = numpy.clip(mt2 / (mt2 + rho * s), 0, 1)
if isinstance(factor, numpy.ndarray):
factor[numpy.isnan(factor)] = 0
else:
if numpy.isnan(factor):
factor = 0
param.data -= hp.eta * (hp.lr * mt * factor +
hp.weight_decay_rate * param.data)
self.beta_power *= hp.beta
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
hp = self.hyperparam
rho = (((1.0 - hp.beta) ** 2) * (1.0 - self.beta_power ** 2) /
(((1.0 - self.beta_power) ** 2) * (1.0 - hp.beta ** 2)))
rho = min(rho, 0.9999)
cuda.elementwise(
'T grad, T lr, T one_minus_beta, T eta, \
T weight_decay_rate, T beta_power, T rho',
'T param, T m, T v',
'''m += one_minus_beta * (grad - m);
v += one_minus_beta * (grad * grad - v);
T mt = m / (1.0 - beta_power);
T vt = v / (1.0 - beta_power);
T mt2 = mt*mt;
T s = (vt - mt2) / (1.0 - rho);
T factor;
if (m == 0 && v == 0)
factor = 0.0;
else
factor = min(1.0, max(0.0, mt2 / (mt2 + rho * s)));
param -= eta * (lr * mt * factor +
weight_decay_rate * param);''',
'msvag')(grad, hp.lr, 1 - hp.beta,
hp.eta, hp.weight_decay_rate,
self.beta_power, rho,
param.data, self.state['m'], self.state['v'])
self.beta_power *= hp.beta
class MSVAG(optimizer.GradientMethod):
"""M-SVAG optimizer.
See: `Dissecting Adam: The Sign, Magnitude and Variance of Stochastic
Gradients <https://arxiv.org/abs/1705.07774>`_
Modified for proper weight decay (also called AdamW).
AdamW introduces the additional parameters ``eta``
and ``weight_decay_rate``, which can be used to properly scale the
learning rate, and decouple the weight decay rate from ``alpha``,
as shown in the below paper.
See: `Fixing Weight Decay Regularization in Adam
<https://openreview.net/forum?id=rk6qdGgCZ>`_
Args:
lr (float): Learning rate.
beta (float): Exponential decay rate of the first and second order
moment.
eta (float): Schedule multiplier, can be used for warm restarts.
weight_decay_rate (float): Weight decay rate.
"""
def __init__(self,
lr=_default_hyperparam.lr,
beta=_default_hyperparam.beta,
eta=_default_hyperparam.eta,
weight_decay_rate=_default_hyperparam.weight_decay_rate):
super(MSVAG, self).__init__()
self.hyperparam.lr = lr
self.hyperparam.beta = beta
self.hyperparam.eta = eta
self.hyperparam.weight_decay_rate = weight_decay_rate
lr = optimizer.HyperparameterProxy('lr')
beta = optimizer.HyperparameterProxy('beta')
eta = optimizer.HyperparameterProxy('eta')
weight_decay_rate = optimizer.HyperparameterProxy('weight_decay_rate')
def create_update_rule(self):
return MSVAGRule(self.hyperparam)
| 6,356
| 31.433673
| 84
|
py
|
chainer
|
chainer-master/chainer/optimizers/ada_grad.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import optimizer
from chainer import types
if types.TYPE_CHECKING:
import typing_extensions as tpe
class AdaGradHyperparameter(tpe.Protocol):
"""Protocol class for hyperparameter of AdaGrad.
This is only for PEP 544 compliant static type checkers.
"""
lr = None # type: float
eps = None # type: float
_default_hyperparam = optimizer.Hyperparameter() # type: AdaGradHyperparameter # NOQA
_default_hyperparam.lr = 0.001
_default_hyperparam.eps = 1e-8
class AdaGradRule(optimizer.UpdateRule):
"""Update rule of AdaGrad.
See :class:`~chainer.optimizers.AdaGrad` for the default values of the
hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
lr (float): Learning rate.
eps (float): Small value for the numerical stability.
"""
is_elementwise = True
_kernel = None
def __init__(self, parent_hyperparam=None, lr=None, eps=None):
super(AdaGradRule, self).__init__(
parent_hyperparam or _default_hyperparam)
if lr is not None:
self.hyperparam.lr = lr
if eps is not None:
self.hyperparam.eps = eps
def init_state(self, param):
with chainer.using_device(param.device):
self.state['h'] = param.device.xp.zeros_like(param.data)
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
lr = self.hyperparam.lr
eps = self.hyperparam.eps
h = self.state['h']
h += grad * grad
param.data -= lr * grad / (numpy.sqrt(h) + eps)
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
if AdaGradRule._kernel is None:
AdaGradRule._kernel = cuda.elementwise(
'T grad, T lr, T eps',
'T param, T h',
'''h += grad * grad;
param -= lr * grad / (sqrt(h) + eps);''',
'adagrad')
AdaGradRule._kernel(grad, self.hyperparam.lr, self.hyperparam.eps,
param.data, self.state['h'])
class AdaGrad(optimizer.GradientMethod):
"""AdaGrad optimizer.
See: http://jmlr.org/papers/v12/duchi11a.html
Args:
lr (float): Learning rate.
eps (float): Small value for the numerical stability.
"""
def __init__(self, lr=_default_hyperparam.lr, eps=_default_hyperparam.eps):
super(AdaGrad, self).__init__()
self.hyperparam.lr = lr
self.hyperparam.eps = eps
lr = optimizer.HyperparameterProxy('lr')
eps = optimizer.HyperparameterProxy('eps')
def create_update_rule(self):
return AdaGradRule(self.hyperparam)
| 2,899
| 26.619048
| 86
|
py
|
chainer
|
chainer-master/chainer/optimizers/adam.py
|
from __future__ import division
import math
import warnings
import numpy
import chainer
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import optimizer
from chainer import types
if types.TYPE_CHECKING:
import typing_extensions as tpe
class AdamHyperparameter(tpe.Protocol):
"""Protocol class for hyperparameter of Adam.
This is only for PEP 544 compliant static type checkers.
"""
alpha = None # type: float
beta1 = None # type: float
beta2 = None # type: float
eps = None # type: float
eta = None # type: float
weight_decay_rate = None # type: float
amsgrad = None # type: bool
adabound = None # type: bool
final_lr = None # type: float
gamma = None # type: float
_default_hyperparam = optimizer.Hyperparameter() # type: AdamHyperparameter # NOQA
_default_hyperparam.alpha = 0.001
_default_hyperparam.beta1 = 0.9
_default_hyperparam.beta2 = 0.999
_default_hyperparam.eps = 1e-8
_default_hyperparam.eta = 1.0
_default_hyperparam.weight_decay_rate = 0
_default_hyperparam.amsgrad = False
_default_hyperparam.adabound = False
_default_hyperparam.final_lr = 0.1
_default_hyperparam.gamma = 1e-3
def _learning_rate(hp, t):
if t == 0:
raise RuntimeError(
'Can\'t determine the learning rate of Adam optimizer '
'because the update steps have not been started.')
fix1 = 1. - math.pow(hp.beta1, t)
fix2 = 1. - math.pow(hp.beta2, t)
return hp.alpha * math.sqrt(fix2) / fix1
def _get_intermediate_dtype(dtype):
# Returns the dtype for intermediate calculation.
# For float16 input, float32 is used.
# Otherwise the same dtype as the parameter is used.
if dtype == numpy.float16:
return numpy.float32
return dtype
def _inplace_axpby(x, a, b, y):
# in-place axpby: x = a * x + b * y
if isinstance(x, intel64.mdarray):
x.inplace_axpby(a, b, y)
else:
if a == 1:
x += b * y
else:
x[...] = a * x + b * y
class AdamRule(optimizer.UpdateRule):
"""Update rule of Adam optimization algorithm.
See: `Adam: A Method for Stochastic Optimization
<https://arxiv.org/abs/1412.6980v8>`_
Modified for proper weight decay.
See: `Fixing Weight Decay Regularization in Adam
<https://openreview.net/forum?id=rk6qdGgCZ>`_
With option to use AMSGrad variant of Adam.
See: `On the Convergence of Adam and Beyond
<https://openreview.net/forum?id=ryQu7f-RZ>`_
With option to use AdaBound variant of Adam.
See: `Adaptive Gradient Methods with Dynamic Bound of Learning Rate
<https://openreview.net/forum?id=Bkg3g2R9FX>`
See :class:`~chainer.optimizers.Adam` for the default values
of the hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
alpha (float): Coefficient of learning rate.
beta1 (float): Exponential decay rate of the first order moment.
beta2 (float): Exponential decay rate of the second order moment.
eps (float): Small value for the numerical stability.
eta (float): Schedule multiplier, can be used for warm restarts.
weight_decay_rate (float): Weight decay rate.
amsgrad (bool): Whether to use the AMSGrad variant of Adam.
adabound (bool): Whether to use the AdaBound variant of Adam.
final_lr (float): Final (SGD) learning rate in AdaBound.
gamma (float): Convergence speed of the bound functions in AdaBound.
"""
is_elementwise = True
_kernel = None
_amsgrad_kernel = None
_adabound_kernel = None
_amsbound_kernel = None
# Only used in `update_core_gpu`.
# A dummy ndarray to help ElementwiseKernel deduce generic type T as
# `dtype`.
# It cannot be deduced only by scalar arguments.
_dummy = None
def __init__(self, parent_hyperparam=None,
alpha=None, beta1=None, beta2=None, eps=None,
eta=None, weight_decay_rate=None, amsgrad=None,
adabound=None, final_lr=None, gamma=None):
super(AdamRule, self).__init__(
parent_hyperparam or _default_hyperparam)
if alpha is not None:
self.hyperparam.alpha = alpha
if beta1 is not None:
self.hyperparam.beta1 = beta1
if beta2 is not None:
self.hyperparam.beta2 = beta2
if eps is not None:
self.hyperparam.eps = eps
if eta is not None:
self.hyperparam.eta = eta
if weight_decay_rate is not None:
self.hyperparam.weight_decay_rate = weight_decay_rate
if amsgrad is not None:
self.hyperparam.amsgrad = amsgrad
if adabound is not None:
self.hyperparam.adabound = adabound
if final_lr is not None:
self.hyperparam.final_lr = final_lr
if gamma is not None:
self.hyperparam.gamma = gamma
if self.hyperparam.adabound:
self.initial_alpha = self.hyperparam.alpha
def init_state(self, param):
with chainer.using_device(param.device):
xp = param.device.xp
self.state['m'] = xp.zeros_like(param.data)
self.state['v'] = xp.zeros_like(param.data)
if self.hyperparam.amsgrad:
self.state['vhat'] = xp.zeros_like(param.data)
# For iDeep
if isinstance(param.data, intel64.mdarray):
self.state['m'] = intel64.ideep.array(
self.state['m'], itype=intel64.ideep.wgt_array)
self.state['v'] = intel64.ideep.array(
self.state['v'], itype=intel64.ideep.wgt_array)
if self.hyperparam.amsgrad:
self.state['vhat'] = intel64.ideep.array(
self.state['vhat'], itype=intel64.ideep.wgt_array)
def _check_eps(self, interm_dtype):
# Checks that the eps does not underflow.
hp = self.hyperparam
eps = interm_dtype(hp.eps)
if hp.eps != 0 and eps == 0:
raise ValueError(
'eps of Adam optimizer is too small for {} ({})'.format(
interm_dtype.name, hp.eps))
# Note that the converted `eps` (numpy scalar) is discarded here and
# the original `hp.eps` is used in calculation, because Python
# scalars are faster in cupy elementwise kernels.
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
hp = self.hyperparam
dtype = _get_intermediate_dtype(param.dtype.type)
self._check_eps(dtype)
grad = grad.astype(dtype, copy=False)
m, v = self.state['m'], self.state['v']
# m += (1 - beta1) * (grad - m)
_inplace_axpby(m, 1.0, 1.0 - hp.beta1, grad - m)
# v += (1 - beta2) * (grad * grad - v)
_inplace_axpby(v, 1.0, 1.0 - hp.beta2, grad*grad - v)
if hp.amsgrad:
vhat = self.state['vhat']
# For iDeep
if isinstance(vhat, intel64.mdarray):
vhat[...] = numpy.maximum(vhat, v)
else:
numpy.maximum(vhat, v, out=vhat)
else:
vhat = v
vhat = vhat.astype(dtype, copy=False)
step = self.alpha_t / (numpy.sqrt(vhat) + hp.eps)
if hp.adabound:
lower, upper = self.bounds
step = numpy.clip(step, lower, upper)
# param -=
# eta * (step * m - weight_decay_rate * param)
_inplace_axpby(
param.data, 1.0 - hp.eta * hp.weight_decay_rate, -hp.eta, step * m)
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
hp = self.hyperparam
dtype = _get_intermediate_dtype(param.dtype.type)
self._check_eps(dtype)
if self._dummy is None:
self._dummy = cuda.cupy.empty((0,), dtype=dtype)
if hp.adabound:
lower, upper = self.bounds
if hp.amsgrad and hp.adabound:
if AdamRule._amsbound_kernel is None:
AdamRule._amsbound_kernel = cuda.elementwise(
'P grad, T alpha_t, T one_minus_beta1, T one_minus_beta2, '
'T lower, T upper, '
'T eps, T eta, T weight_decay_rate, raw T dummy',
'P param, P m, P v, P vhat',
'''T grad_ = static_cast<T>(grad);
T m_ = static_cast<T>(m);
T v_ = static_cast<T>(v);
T vhat_ = static_cast<T>(vhat);
m_ += one_minus_beta1 * (grad_ - m_);
v_ += one_minus_beta2 * (grad_ * grad_ - v_);
vhat_ = max(vhat_, v_);
vhat = static_cast<T>(vhat_);
m = static_cast<P>(m_);
v = static_cast<P>(v_);
param -= eta *
(max(min(alpha_t / (sqrt(vhat_) + eps), upper),
lower) * m_ + weight_decay_rate * param);''',
'amsbound')
AdamRule._amsbound_kernel(
grad, self.alpha_t, 1 - hp.beta1,
1 - hp.beta2, lower, upper, hp.eps,
hp.eta, hp.weight_decay_rate, self._dummy,
param.data, self.state['m'], self.state['v'],
self.state['vhat'])
elif hp.adabound:
if AdamRule._adabound_kernel is None:
AdamRule._adabound_kernel = cuda.elementwise(
'P grad, T alpha_t, T one_minus_beta1, T one_minus_beta2, '
'T lower, T upper, '
'T eps, T eta, T weight_decay_rate, raw T dummy',
'P param, P m, P v',
'''T grad_ = static_cast<T>(grad);
T m_ = static_cast<T>(m);
T v_ = static_cast<T>(v);
m_ += one_minus_beta1 * (grad_ - m_);
v_ += one_minus_beta2 * (grad_ * grad_ - v_);
m = static_cast<P>(m_);
v = static_cast<P>(v_);
param -= eta *
(max(min(alpha_t / (sqrt(v_) + eps), upper),
lower) * m_ + weight_decay_rate * param);''',
'adabound')
AdamRule._adabound_kernel(
grad, self.alpha_t, 1 - hp.beta1,
1 - hp.beta2, lower, upper, hp.eps,
hp.eta, hp.weight_decay_rate, self._dummy,
param.data, self.state['m'], self.state['v'])
elif hp.amsgrad:
if AdamRule._amsgrad_kernel is None:
AdamRule._amsgrad_kernel = cuda.elementwise(
'P grad, T alpha_t, T one_minus_beta1, T one_minus_beta2, '
'T eps, T eta, T weight_decay_rate, raw T dummy',
'P param, P m, P v, P vhat',
'''T grad_ = static_cast<T>(grad);
T m_ = static_cast<T>(m);
T v_ = static_cast<T>(v);
T vhat_ = static_cast<T>(vhat);
m_ += one_minus_beta1 * (grad_ - m_);
v_ += one_minus_beta2 * (grad_ * grad_ - v_);
vhat_ = max(vhat_, v_);
vhat = static_cast<T>(vhat_);
m = static_cast<P>(m_);
v = static_cast<P>(v_);
param -= eta * (alpha_t * m_ / (sqrt(vhat_) + eps) +
weight_decay_rate * param);''',
'adam')
AdamRule._amsgrad_kernel(
grad, self.alpha_t, 1 - hp.beta1,
1 - hp.beta2, hp.eps,
hp.eta, hp.weight_decay_rate, self._dummy,
param.data, self.state['m'], self.state['v'],
self.state['vhat'])
else:
if AdamRule._kernel is None:
AdamRule._kernel = cuda.elementwise(
'P grad, T alpha_t, T one_minus_beta1, T one_minus_beta2, '
'T eps, T eta, T weight_decay_rate, raw T dummy',
'P param, P m, P v',
'''T grad_ = static_cast<T>(grad);
T m_ = static_cast<T>(m);
T v_ = static_cast<T>(v);
m_ += one_minus_beta1 * (grad_ - m_);
v_ += one_minus_beta2 * (grad_ * grad_ - v_);
m = static_cast<P>(m_);
v = static_cast<P>(v_);
param -= eta * (alpha_t * m_ / (sqrt(v_) + eps) +
weight_decay_rate * param);''',
'adam')
AdamRule._kernel(
grad, self.alpha_t, 1 - hp.beta1,
1 - hp.beta2, hp.eps,
hp.eta, hp.weight_decay_rate, self._dummy,
param.data, self.state['m'], self.state['v'])
@property
def alpha_t(self):
return _learning_rate(self.hyperparam, self.t)
@property
def lr(self):
warnings.warn(
'AdamRule.lr has been renamed to AdamRule.alpha_t. '
'Use of AdamRule.lr is deprecated in Chainer v6.',
DeprecationWarning)
return self.alpha_t
@property
def bounds(self):
if self.t == 0:
raise RuntimeError(
'Can\'t determine the bounds of AdaBound optimizer '
'because the update steps have not been started.')
hp = self.hyperparam
# Workaround to reflect changing `alpha` in `final_lr`.
# (by some of `chainer.training.extensions`)
final_lr = hp.final_lr * hp.alpha / self.initial_alpha
lower = final_lr * (1.0 - 1.0 / (hp.gamma * self.t + 1))
upper = final_lr * (1.0 + 1.0 / (hp.gamma * self.t))
return lower, upper
class Adam(optimizer.GradientMethod):
"""Adam optimizer.
See: `Adam: A Method for Stochastic Optimization
<https://arxiv.org/abs/1412.6980v8>`_
Modified for proper weight decay (also called
:class:`~chainer.optimizers.AdamW`).
AdamW introduces the additional parameters ``eta``
and ``weight_decay_rate``, which can be used to properly scale the
learning rate, and decouple the weight decay rate from ``alpha``,
as shown in the below paper.
Note that with the default values ``eta = 1`` and
``weight_decay_rate = 0``, this implementation is identical to
the standard Adam method.
See: `Fixing Weight Decay Regularization in Adam
<https://openreview.net/forum?id=rk6qdGgCZ>`_
A flag ``amsgrad`` to use the :class:`~chainer.optimizers.AMSGrad`
variant of Adam from the paper:
`On the Convergence of Adam and Beyond
<https://openreview.net/forum?id=ryQu7f-RZ>`_
A flag ``adabound`` to use the :class:`~chainer.optimizers.AdaBound`
variant of Adam from the paper:
`Adaptive Gradient Methods with Dynamic Bound of Learning Rate
<https://openreview.net/forum?id=Bkg3g2R9FX>`_
If both ``amsgrad`` and ``adabound`` are ``True``, the optimizer is
equivalent to :class:`~chainer.optimizers.AMSBound` proposed in the
AdaBound paper.
Args:
alpha (float): Coefficient of learning rate.
beta1 (float): Exponential decay rate of the first order moment.
beta2 (float): Exponential decay rate of the second order moment.
eps (float): Small value for the numerical stability.
eta (float): Schedule multiplier, can be used for warm restarts.
weight_decay_rate (float): Weight decay rate.
amsgrad (bool): Whether to use AMSGrad variant of Adam.
adabound (bool): Whether to use the AdaBound variant of Adam.
final_lr (float): Final (SGD) learning rate in AdaBound.
gamma (float): Convergence speed of the bound functions in AdaBound.
"""
def __init__(self,
alpha=_default_hyperparam.alpha,
beta1=_default_hyperparam.beta1,
beta2=_default_hyperparam.beta2,
eps=_default_hyperparam.eps,
eta=_default_hyperparam.eta,
weight_decay_rate=_default_hyperparam.weight_decay_rate,
amsgrad=_default_hyperparam.amsgrad,
adabound=_default_hyperparam.adabound,
final_lr=_default_hyperparam.final_lr,
gamma=_default_hyperparam.gamma):
super(Adam, self).__init__()
self.hyperparam.alpha = alpha
self.hyperparam.beta1 = beta1
self.hyperparam.beta2 = beta2
self.hyperparam.eps = eps
self.hyperparam.eta = eta
self.hyperparam.weight_decay_rate = weight_decay_rate
self.hyperparam.amsgrad = amsgrad
self.hyperparam.adabound = adabound
self.hyperparam.final_lr = final_lr
self.hyperparam.gamma = gamma
alpha = optimizer.HyperparameterProxy('alpha')
beta1 = optimizer.HyperparameterProxy('beta1')
beta2 = optimizer.HyperparameterProxy('beta2')
eps = optimizer.HyperparameterProxy('eps')
eta = optimizer.HyperparameterProxy('eta')
weight_decay_rate = optimizer.HyperparameterProxy('weight_decay_rate')
amsgrad = optimizer.HyperparameterProxy('amsgrad')
adabound = optimizer.HyperparameterProxy('adabound')
final_lr = optimizer.HyperparameterProxy('final_lr')
gamma = optimizer.HyperparameterProxy('gamma')
def create_update_rule(self):
return AdamRule(self.hyperparam)
@property
def alpha_t(self):
return _learning_rate(self.hyperparam, self.t)
@property
def lr(self):
warnings.warn(
'Adam.lr has been renamed to AdamRule.alpha_t. '
'Use of Adam.lr is deprecated in Chainer v6.',
DeprecationWarning)
return self.alpha_t
class AdamW(Adam):
"""AdamW optimizer.
This class is a special case of :class:`~chainer.optimizers.Adam`.
See: `Fixing Weight Decay Regularization in Adam
<https://openreview.net/forum?id=rk6qdGgCZ>`_
Args:
alpha (float): Coefficient of learning rate.
beta1 (float): Exponential decay rate of the first order moment.
beta2 (float): Exponential decay rate of the second order moment.
eps (float): Small value for the numerical stability.
eta (float): Schedule multiplier, can be used for warm restarts.
The default value is 1.0.
weight_decay_rate (float): Weight decay rate.
The default value is 0.
"""
def __init__(self,
alpha=_default_hyperparam.alpha,
beta1=_default_hyperparam.beta1,
beta2=_default_hyperparam.beta2,
eps=_default_hyperparam.eps,
eta=_default_hyperparam.eta,
weight_decay_rate=_default_hyperparam.weight_decay_rate):
super(AdamW, self).__init__(
alpha=alpha, beta1=beta1, beta2=beta2, eps=eps, eta=eta,
weight_decay_rate=weight_decay_rate)
class AMSGrad(Adam):
"""AMSGrad optimizer.
This class is a special case of :class:`~chainer.optimizers.Adam`.
See: `On the Convergence of Adam and Beyond
<https://openreview.net/forum?id=ryQu7f-RZ>`_
Args:
alpha (float): Coefficient of learning rate.
beta1 (float): Exponential decay rate of the first order moment.
beta2 (float): Exponential decay rate of the second order moment.
eps (float): Small value for the numerical stability.
eta (float): Schedule multiplier, can be used for warm restarts.
"""
def __init__(self,
alpha=_default_hyperparam.alpha,
beta1=_default_hyperparam.beta1,
beta2=_default_hyperparam.beta2,
eps=_default_hyperparam.eps,
eta=_default_hyperparam.eta):
super(AMSGrad, self).__init__(
alpha=alpha, beta1=beta1, beta2=beta2, eps=eps, eta=eta,
amsgrad=True)
class AdaBound(Adam):
"""AdaBound optimizer.
This class is a special case of :class:`~chainer.optimizers.Adam`.
See: `Adaptive Gradient Methods with Dynamic Bound of Learning Rate
<https://openreview.net/forum?id=Bkg3g2R9FX>`_
Args:
alpha (float): Coefficient of learning rate.
beta1 (float): Exponential decay rate of the first order moment.
beta2 (float): Exponential decay rate of the second order moment.
final_lr (float): Final (SGD) learning rate in AdaBound.
gamma (float): Convergence speed of the bound functions in AdaBound.
eps (float): Small value for the numerical stability.
eta (float): Schedule multiplier, can be used for warm restarts.
"""
def __init__(self,
alpha=_default_hyperparam.alpha,
beta1=_default_hyperparam.beta1,
beta2=_default_hyperparam.beta2,
final_lr=_default_hyperparam.final_lr,
gamma=_default_hyperparam.gamma,
eps=_default_hyperparam.eps,
eta=_default_hyperparam.eta):
super(AdaBound, self).__init__(
alpha=alpha, beta1=beta1, beta2=beta2, eps=eps, eta=eta,
amsgrad=False, adabound=True, final_lr=final_lr, gamma=gamma)
class AMSBound(Adam):
"""AMSBound optimizer.
This class is a special case of :class:`~chainer.optimizers.Adam`.
See: `Adaptive Gradient Methods with Dynamic Bound of Learning Rate
<https://openreview.net/forum?id=Bkg3g2R9FX>`_
Args:
alpha (float): Coefficient of learning rate.
beta1 (float): Exponential decay rate of the first order moment.
beta2 (float): Exponential decay rate of the second order moment.
final_lr (float): Final (SGD) learning rate in AdaBound.
gamma (float): Convergence speed of the bound functions in AdaBound.
eps (float): Small value for the numerical stability.
eta (float): Schedule multiplier, can be used for warm restarts.
"""
def __init__(self,
alpha=_default_hyperparam.alpha,
beta1=_default_hyperparam.beta1,
beta2=_default_hyperparam.beta2,
final_lr=_default_hyperparam.final_lr,
gamma=_default_hyperparam.gamma,
eps=_default_hyperparam.eps,
eta=_default_hyperparam.eta):
super(AMSBound, self).__init__(
alpha=alpha, beta1=beta1, beta2=beta2, eps=eps, eta=eta,
amsgrad=True, adabound=True, final_lr=final_lr, gamma=gamma)
| 22,884
| 38.321306
| 83
|
py
|
chainer
|
chainer-master/chainer/optimizers/__init__.py
|
# import classes and functions
from chainer.optimizers.ada_delta import AdaDelta # NOQA
from chainer.optimizers.ada_grad import AdaGrad # NOQA
from chainer.optimizers.adam import Adam # NOQA
from chainer.optimizers.adam import AdamW # NOQA
from chainer.optimizers.adam import AMSGrad # NOQA
from chainer.optimizers.adam import AdaBound # NOQA
from chainer.optimizers.adam import AMSBound # NOQA
from chainer.optimizers.corrected_momentum_sgd import CorrectedMomentumSGD # NOQA
from chainer.optimizers.momentum_sgd import MomentumSGD # NOQA
from chainer.optimizers.msvag import MSVAG # NOQA
from chainer.optimizers.nesterov_ag import NesterovAG # NOQA
from chainer.optimizers.rmsprop import RMSprop # NOQA
from chainer.optimizers.rmsprop_graves import RMSpropGraves # NOQA
from chainer.optimizers.sgd import SGD # NOQA
from chainer.optimizers.smorms3 import SMORMS3 # NOQA
| 887
| 51.235294
| 82
|
py
|
chainer
|
chainer-master/chainer/optimizers/ada_delta.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import optimizer
from chainer import types
if types.TYPE_CHECKING:
import typing_extensions as tpe
class AdaDeltaHyperparameter(tpe.Protocol):
"""Protocol class for hyperparameter of Zeiler's ADADELTA.
This is only for PEP 544 compliant static type checkers.
"""
rho = None # type: float
eps = None # type: float
_default_hyperparam = optimizer.Hyperparameter() # type: AdaDeltaHyperparameter # NOQA
_default_hyperparam.rho = 0.95
_default_hyperparam.eps = 1e-6
class AdaDeltaRule(optimizer.UpdateRule):
"""Update rule of Zeiler's ADADELTA.
See :class:`~chainer.optimizers.AdaDelta` for the default values of the
hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
rho (float): Exponential decay rate of the first and second order
moments.
eps (float): Small value for the numerical stability.
"""
is_elementwise = True
_kernel = None
def __init__(self, parent_hyperparam=None, rho=None, eps=None):
super(AdaDeltaRule, self).__init__(
parent_hyperparam or _default_hyperparam)
if rho is not None:
self.hyperparam.rho = rho
if eps is not None:
self.hyperparam.eps = eps
def init_state(self, param):
with chainer.using_device(param.device):
xp = param.device.xp
self.state['msg'] = xp.zeros_like(param.data)
self.state['msdx'] = xp.zeros_like(param.data)
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
msg, msdx = self.state['msg'], self.state['msdx']
rho = self.hyperparam.rho
eps = self.hyperparam.eps
msg *= rho
msg += (1 - rho) * grad * grad
dx = numpy.sqrt((msdx + eps) / (msg + eps)) * grad
msdx *= rho
msdx += (1 - rho) * dx * dx
param.data -= dx
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
if AdaDeltaRule._kernel is None:
AdaDeltaRule._kernel = cuda.elementwise(
'T grad, T one_minus_rho, T eps',
'T param, T msg, T msdx',
'''msg = msg + one_minus_rho * (grad * grad - msg);
T dx = sqrt((msdx + eps) / (msg + eps)) * grad;
msdx += one_minus_rho * (dx * dx - msdx);
param -= dx;''',
'adadelta')
AdaDeltaRule._kernel(
grad, 1 - self.hyperparam.rho, self.hyperparam.eps, param.data,
self.state['msg'], self.state['msdx'])
class AdaDelta(optimizer.GradientMethod):
"""Zeiler's ADADELTA.
See: http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf
Args:
rho (float): Exponential decay rate of the first and second order
moments.
eps (float): Small value for the numerical stability.
"""
def __init__(self, rho=_default_hyperparam.rho,
eps=_default_hyperparam.eps):
super(AdaDelta, self).__init__()
self.hyperparam.rho = rho
self.hyperparam.eps = eps
rho = optimizer.HyperparameterProxy('rho')
eps = optimizer.HyperparameterProxy('eps')
def create_update_rule(self):
return AdaDeltaRule(self.hyperparam)
| 3,514
| 29.301724
| 87
|
py
|
chainer
|
chainer-master/chainer/optimizers/rmsprop_graves.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import optimizer
from chainer import types
if types.TYPE_CHECKING:
import typing_extensions as tpe
class RMSpropGravesHyperparameter(tpe.Protocol):
"""Protocol class for hyperparameter of Alex Graves's RMSprop.
This is only for PEP 544 compliant static type checkers.
"""
lr = None # type: float
alpha = None # type: float
momentum = None # type: float
eps = None # type: float
_default_hyperparam = optimizer.Hyperparameter() # type: RMSpropGravesHyperparameter # NOQA
_default_hyperparam.lr = 1e-4
_default_hyperparam.alpha = 0.95
_default_hyperparam.momentum = 0.9
_default_hyperparam.eps = 1e-4
class RMSpropGravesRule(optimizer.UpdateRule):
"""Update rule for Alex Graves's RMSprop.
See :class:`~chainer.optimizers.RMSpropGraves` for the default values of
the hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
lr (float): Learning rate.
alpha (float): Exponential decay rate of the first and second order
moments of the raw gradient.
momentum (float): Exponential decay rate of the first order moment of
the adjusted gradient.
eps (float): Small value for the numerical stability.
"""
is_elementwise = True
_kernel = None
def __init__(self, parent_hyperparam=None,
lr=None, alpha=None, momentum=None, eps=None):
super(RMSpropGravesRule, self).__init__(
parent_hyperparam or _default_hyperparam)
if lr is not None:
self.hyperparam.lr = lr
if alpha is not None:
self.hyperparam.alpha = alpha
if momentum is not None:
self.hyperparam.momentum = momentum
if eps is not None:
self.hyperparam.eps = eps
def init_state(self, param):
with chainer.using_device(param.device):
xp = param.device.xp
self.state['n'] = xp.zeros_like(param.data)
self.state['g'] = xp.zeros_like(param.data)
self.state['delta'] = xp.zeros_like(param.data)
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
n, g, delta = self.state['n'], self.state['g'], self.state['delta']
hp = self.hyperparam
n *= hp.alpha
n += (1 - hp.alpha) * grad * grad
g *= hp.alpha
g += (1 - hp.alpha) * grad
delta *= hp.momentum
delta -= hp.lr * grad / numpy.sqrt(n - g * g + hp.eps)
param.data += delta
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
hp = self.hyperparam
if RMSpropGravesRule._kernel is None:
RMSpropGravesRule._kernel = cuda.elementwise(
'T grad, T lr, T alpha, T momentum, T eps',
'T param, T avg_n, T avg_g, T delta',
'''avg_n = alpha * avg_n + (1 - alpha) * grad * grad;
avg_g = alpha * avg_g + (1 - alpha) * grad;
delta = delta * momentum -
lr * grad * rsqrt(avg_n - avg_g * avg_g + eps);
param += delta;''',
'rmsprop_graves')
RMSpropGravesRule._kernel(
grad, hp.lr, hp.alpha, hp.momentum, hp.eps, param.data,
self.state['n'], self.state['g'], self.state['delta'])
class RMSpropGraves(optimizer.GradientMethod):
"""Alex Graves's RMSprop.
See: https://arxiv.org/abs/1308.0850
Args:
lr (float): Learning rate.
alpha (float): Exponential decay rate of the first and second order
moments of the raw gradient.
momentum (float): Exponential decay rate of the first order moment of
the adjusted gradient.
eps (float): Small value for the numerical stability.
"""
def __init__(self, lr=_default_hyperparam.lr,
alpha=_default_hyperparam.alpha,
momentum=_default_hyperparam.momentum,
eps=_default_hyperparam.eps):
super(RMSpropGraves, self).__init__()
self.hyperparam.lr = lr
self.hyperparam.alpha = alpha
self.hyperparam.momentum = momentum
self.hyperparam.eps = eps
lr = optimizer.HyperparameterProxy('lr')
alpha = optimizer.HyperparameterProxy('alpha')
momentum = optimizer.HyperparameterProxy('momentum')
eps = optimizer.HyperparameterProxy('eps')
def create_update_rule(self):
return RMSpropGravesRule(self.hyperparam)
| 4,709
| 32.642857
| 92
|
py
|
chainer
|
chainer-master/chainer/optimizers/rmsprop.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import optimizer
from chainer import types
if types.TYPE_CHECKING:
import typing_extensions as tpe
class RMSpropHyperparameter(tpe.Protocol):
"""Protocol class for hyperparameter of RMSprop.
This is only for PEP 544 compliant static type checkers.
"""
lr = None # type: float
alpha = None # type: float
eps = None # type: float
eps_inside_sqrt = None # type: bool
_default_hyperparam = optimizer.Hyperparameter() # type: RMSpropHyperparameter # NOQA
_default_hyperparam.lr = 0.01
_default_hyperparam.alpha = 0.99
_default_hyperparam.eps = 1e-8
_default_hyperparam.eps_inside_sqrt = False
class RMSpropRule(optimizer.UpdateRule):
"""Update rule for RMSprop.
See :class:`~chainer.optimizers.RMSprop` for the default values of the
hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
lr (float): Learning rate.
alpha (float): Exponential decay rate of the second order moment.
eps (float): Small value for the numerical stability.
eps_inside_sqrt (bool): When ``True``, gradient will be divided by
:math:`\\sqrt{ms + eps}` where ``ms`` is the mean square. When
``False`` (default), gradient will be divided by
:math:`\\sqrt{ms} + eps` instead.
This option may be convenient for users porting code from other
frameworks;
see `#4754 <https://github.com/chainer/chainer/issues/4754>`__ for
details.
"""
is_elementwise = True
def __init__(self, parent_hyperparam=None, lr=None, alpha=None, eps=None,
eps_inside_sqrt=None):
super(RMSpropRule, self).__init__(
parent_hyperparam or _default_hyperparam)
if lr is not None:
self.hyperparam.lr = lr
if alpha is not None:
self.hyperparam.alpha = alpha
if eps is not None:
self.hyperparam.eps = eps
if eps_inside_sqrt is not None:
self.hyperparam.eps_inside_sqrt = eps_inside_sqrt
def init_state(self, param):
with chainer.using_device(param.device):
self.state['ms'] = param.device.xp.zeros_like(param.data)
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
hp = self.hyperparam
eps = grad.dtype.type(hp.eps)
if hp.eps != 0 and eps == 0:
raise ValueError(
'eps of RMSprop optimizer is too small for {} ({})'.format(
grad.dtype.name, hp.eps))
ms = self.state['ms']
ms *= hp.alpha
ms += (1 - hp.alpha) * grad * grad
if hp.eps_inside_sqrt:
denom = numpy.sqrt(ms + eps)
else:
denom = numpy.sqrt(ms) + eps
param.data -= hp.lr * grad / denom
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
hp = self.hyperparam
eps = grad.dtype.type(hp.eps)
if eps == 0:
raise ValueError(
'eps of RMSprop optimizer is too small for {} ({})'.format(
grad.dtype.name, hp.eps))
if hp.eps_inside_sqrt:
denom = 'sqrt(ms + eps)'
else:
denom = 'sqrt(ms) + eps'
kernel = cuda.elementwise(
'T grad, T lr, T alpha, T eps',
'T param, T ms',
'''ms = alpha * ms + (1 - alpha) * grad * grad;
param -= lr * grad / ({});'''.format(denom),
'rmsprop')
kernel(grad, self.hyperparam.lr, self.hyperparam.alpha,
eps, param.data, self.state['ms'])
class RMSprop(optimizer.GradientMethod):
"""RMSprop optimizer.
See: T. Tieleman and G. Hinton (2012). Lecture 6.5 - rmsprop, COURSERA:
Neural Networks for Machine Learning.
Args:
lr (float): Learning rate.
alpha (float): Exponential decay rate of the second order moment.
eps (float): Small value for the numerical stability.
eps_inside_sqrt (bool): When ``True``, gradient will be divided by
:math:`\\sqrt{ms + eps}` where ``ms`` is the mean square. When
``False`` (default), gradient will be divided by
:math:`\\sqrt{ms} + eps` instead.
This option may be convenient for users porting code from other
frameworks;
see `#4754 <https://github.com/chainer/chainer/issues/4754>`__ for
details.
"""
def __init__(self, lr=_default_hyperparam.lr,
alpha=_default_hyperparam.alpha, eps=_default_hyperparam.eps,
eps_inside_sqrt=_default_hyperparam.eps_inside_sqrt):
super(RMSprop, self).__init__()
self.hyperparam.lr = lr
self.hyperparam.alpha = alpha
self.hyperparam.eps = eps
self.hyperparam.eps_inside_sqrt = eps_inside_sqrt
lr = optimizer.HyperparameterProxy('lr')
alpha = optimizer.HyperparameterProxy('alpha')
eps = optimizer.HyperparameterProxy('eps')
eps_inside_sqrt = optimizer.HyperparameterProxy('eps_inside_sqrt')
def create_update_rule(self):
return RMSpropRule(self.hyperparam)
| 5,381
| 33.948052
| 86
|
py
|
chainer
|
chainer-master/chainer/optimizers/nesterov_ag.py
|
import chainer
from chainer.backends import cuda
from chainer import optimizer
from chainer import types
if types.TYPE_CHECKING:
import typing_extensions as tpe
class NesterovAGHyperparameter(tpe.Protocol):
"""Protocol class for hyperparameter of Nesterov's Accelerated Gradient.
This is only for PEP 544 compliant static type checkers.
"""
lr = None # type: float
momentum = None # type: float
_default_hyperparam = optimizer.Hyperparameter() # type: NesterovAGHyperparameter # NOQA
_default_hyperparam.lr = 0.01
_default_hyperparam.momentum = 0.9
class NesterovAGRule(optimizer.UpdateRule):
"""Update rule for Nesterov's Accelerated Gradient.
See :class:`~chainer.optimizers.NesterovAG` for the default values of the
hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
lr (float): Learning rate.
momentum (float): Exponential decay rate of the first order moment.
"""
is_elementwise = True
_kernel = None
def __init__(self, parent_hyperparam=None, lr=None, momentum=None):
super(NesterovAGRule, self).__init__(
parent_hyperparam or _default_hyperparam)
if lr is not None:
self.hyperparam.lr = lr
if momentum is not None:
self.hyperparam.momentum = momentum
def init_state(self, param):
with chainer.using_device(param.device):
xp = param.device.xp
self.state['v'] = xp.zeros_like(param.data)
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
v = self.state['v']
lr, momentum = self.hyperparam.lr, self.hyperparam.momentum
v *= momentum
v -= lr * grad
param.data += momentum * momentum * v
param.data -= (1 + momentum) * lr * grad
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
if NesterovAGRule._kernel is None:
NesterovAGRule._kernel = cuda.elementwise(
'T grad, T lr, T momentum',
'T param, T v',
'''
v = v * momentum - lr * grad;
param += momentum * momentum * v - (1 + momentum) * lr * grad;
''',
'nesterov_ag')
NesterovAGRule._kernel(
grad, self.hyperparam.lr, self.hyperparam.momentum,
param.data, self.state['v'])
class NesterovAG(optimizer.GradientMethod):
"""Nesterov's Accelerated Gradient.
See: https://arxiv.org/abs/1212.0901
Args:
lr (float): Learning rate.
momentum (float): Exponential decay rate of the first order moment.
"""
def __init__(self, lr=_default_hyperparam.lr,
momentum=_default_hyperparam.momentum):
super(NesterovAG, self).__init__()
self.hyperparam.lr = lr
self.hyperparam.momentum = momentum
lr = optimizer.HyperparameterProxy('lr')
momentum = optimizer.HyperparameterProxy('momentum')
def create_update_rule(self):
return NesterovAGRule(self.hyperparam)
| 3,234
| 28.953704
| 89
|
py
|
chainer
|
chainer-master/chainer/function_hooks/cuda_profile.py
|
from chainer.backends import cuda
from chainer import function_hook
class CUDAProfileHook(function_hook.FunctionHook):
name = 'CUDAProfileHook'
def __init__(self):
cuda.check_cuda_available()
if not cuda.cupy.cuda.nvtx_enabled:
raise RuntimeError('nvtx is required for CUDAProfileHook')
def forward_preprocess(self, function, in_data):
cuda.cupy.cuda.nvtx.RangePush(function.label + '.forward')
def forward_postprocess(self, function, in_data):
cuda.cupy.cuda.nvtx.RangePop()
def backward_preprocess(self, function, in_data, out_grad):
cuda.cupy.cuda.nvtx.RangePush(function.label + '.backward')
def backward_postprocess(self, function, in_data, out_grad):
cuda.cupy.cuda.nvtx.RangePop()
| 780
| 30.24
| 70
|
py
|
chainer
|
chainer-master/chainer/function_hooks/timer.py
|
import os
import sys
import time
import numpy
from chainer import backend
from chainer.backends import cuda
from chainer import function_hook
# Select the best-resolution timer function
try:
_get_time = time.perf_counter
except AttributeError:
if os.name == 'nt':
_get_time = time.clock
else:
_get_time = time.time
class TimerHook(function_hook.FunctionHook):
"""Function hook for measuring elapsed time of functions.
Example:
Code example::
from chainer.function_hooks import TimerHook
hook = TimerHook()
with hook:
trainer.run()
hook.print_report()
Output example::
FunctionName ElapsedTime Occurrence
LinearFunction 1.24sec 3900
ReLU 0.59sec 2600
SoftmaxCrossEntropy 0.82sec 1300
Accuracy 0.18sec 700
where *FunctionName* is the name of function that calls the hook,
and *ElapsedTime* is the elapsed time the function consumed,
and *Occurrence* is the number of calls.
Attributes:
call_history: List of measurement results. It consists of pairs of
the name of the function that calls this hook and the elapsed time
the function consumes.
"""
name = 'TimerHook'
table = {'sec': 1, 'ms': 10 ** 3, 'us': 10 ** 6, 'ns': 10 ** 9}
def __init__(self):
self.call_history = []
self._running_stack = []
self._depth = 0
self._total_time = 0
def _preprocess(self):
if self.xp == numpy:
start = _get_time()
self._running_stack.append(start)
else:
start = cuda.Event()
stop = cuda.Event()
start.record()
self._running_stack.append((start, stop))
self._depth += 1
def forward_preprocess(self, function, in_data):
self.xp = backend.get_array_module(*in_data)
self._preprocess()
def backward_preprocess(self, function, in_data, out_grad):
self.xp = backend.get_array_module(*(in_data + out_grad))
self._preprocess()
def _postprocess(self, function):
if self.xp == numpy:
start = self._running_stack.pop()
stop = _get_time()
elapsed_time = stop - start
else:
start, stop = self._running_stack.pop()
stop.record()
stop.synchronize()
# Note that `get_elapsed_time` returns result in milliseconds
elapsed_time = cuda.cupy.cuda.get_elapsed_time(
start, stop) / 1000
self.call_history.append((function._impl_name, elapsed_time))
assert self._depth > 0
self._depth -= 1
if self._depth == 0:
self._total_time += elapsed_time
def forward_postprocess(self, function, in_data):
xp = backend.get_array_module(*in_data)
assert xp == self.xp
self._postprocess(function)
def backward_postprocess(self, function, in_data, out_grad):
xp = backend.get_array_module(*(in_data + out_grad))
assert xp == self.xp
self._postprocess(function)
def total_time(self):
"""Returns total elapsed time in seconds."""
return self._total_time
def summary(self):
"""Returns a summary of time profiling in functions.
Returns:
A summarized dictionary whose keys are function names and
values are dictionaries of `elapsed_time` and `occurrence`.
"""
summary = {}
for function_name, elapsed_time in self.call_history:
if function_name not in summary:
summary[function_name] = {'elapsed_time': 0, 'occurrence': 0}
record = summary[function_name]
record['elapsed_time'] += elapsed_time
record['occurrence'] += 1
return summary
def _choose_unit(self, second):
"""Choose optimal unit."""
factor = 1
for unit in ['sec', 'ms', 'us']:
if second * factor >= 1:
return factor, unit
factor *= 1000.0
return factor, 'ns'
def print_report(self, unit='auto', file=sys.stdout):
"""Prints a summary report of time profiling in functions.
Args:
unit (str): Supplementary units used for computational times.
`sec`, `ms`, `us`, `ns`, `auto`(default) and `auto_foreach`
are supported. If `auto`, units of times are aligned to the
largest, and if `auto_foreach`, units of times are adjusted for
each element.
"""
entries = [['FunctionName', 'ElapsedTime', 'Occurrence']]
auto_foreach = (unit == 'auto_foreach')
if unit == 'auto':
max_time = max(
record['elapsed_time'] for record in self.summary().values())
factor, unit = self._choose_unit(max_time)
elif unit != 'auto_foreach':
factor = self.table[unit]
for function_name, record in self.summary().items():
second = record['elapsed_time']
if auto_foreach:
factor, unit = self._choose_unit(second)
elapsed_time = '%3.2f%s' % (second * factor, unit)
occurrence = str(record['occurrence'])
entries.append([function_name, elapsed_time, occurrence])
entry_widths = []
entry_widths.append(max(len(f) for f, _, _ in entries))
entry_widths.append(max(len(e) for _, e, _ in entries))
entry_widths.append(max(len(o) for _, _, o in entries))
template = ' '.join('{:>%d}' % w for w in entry_widths)
for function_name, elapsed_time, occurrence in entries:
line = template.format(function_name, elapsed_time, occurrence)
file.write(line)
file.write('\n')
if hasattr(file, 'flush'):
file.flush()
| 6,045
| 33.947977
| 79
|
py
|
chainer
|
chainer-master/chainer/function_hooks/cupy_memory_profile.py
|
import collections
import sys
import typing as tp # NOQA
from chainer.backends import cuda
from chainer import function_hook
try:
MemoryHook = cuda.cupy.cuda.memory_hook.MemoryHook # type: tp.Any # to handle https://github.com/python/mypy/issues/2477 # NOQA
memory_hook_available = True
except Exception as e:
_resolution_error = e
MemoryHook = object
memory_hook_available = False
class CupyMemoryProfileHook(function_hook.FunctionHook):
"""Function hook for measuring memory usage of functions in cupy memory pool.
Example:
Code example::
from chainer.function_hooks import CupyMemoryProfileHook
hook = CupyMemoryProfileHook()
with hook:
trainer.run()
hook.print_report()
Output example::
FunctionName UsedBytes AcquiredBytes Occurrence
LinearFunction 5.16GB 179.98MB 3900
ReLU 0.99GB 458.97MB 2600
SoftmaxCrossEntropy 0.01GB 5.08MB 1300
Accuracy 0.00GB 0.35MB 700
where *FunctionName* is the name of function that calls the hook, and
*UsedBytes* is the memory bytes the function used from cupy memory
pool, and *AcquiredBytes* is the actual memory bytes the cupy memory
pool acquired from GPU device on the function call, and *Occurrence*
is the number of calls.
Attributes:
call_history: List of measurement results. It consists of the name of
the function that calls this hook, the memory bytes the function
used from cupy memory pool, and the memory bytes the cupy memory
pool acquired from GPU device on the function call.
"""
name = 'CupyMemoryProfileHook'
_units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB']
_table = {u: 1024.0 ** i for i, u in enumerate(_units)}
def __init__(self):
cuda.check_cuda_available()
if not memory_hook_available:
msg = 'CuPy >= 2.0 is required. %s' % str(_resolution_error)
raise RuntimeError(msg)
self.call_history = []
self._memory_hook = CupyMemoryCumulativeHook()
self._running_stack = []
self._total_used_bytes = 0
self._total_acquired_bytes = 0
def added(self, function=None):
self._memory_hook.__enter__()
def deleted(self, function=None):
self._memory_hook.__exit__()
def _preprocess(self):
start_used_bytes = self._memory_hook.used_bytes
start_acquired_bytes = self._memory_hook.acquired_bytes
self._running_stack.append((start_used_bytes, start_acquired_bytes))
def forward_preprocess(self, function, in_data):
self._preprocess()
def backward_preprocess(self, function, in_data, out_grad):
self._preprocess()
def _postprocess(self, function):
start_used_bytes, start_acquired_bytes = self._running_stack.pop()
end_used_bytes = self._memory_hook.used_bytes
end_acquired_bytes = self._memory_hook.acquired_bytes
used_bytes = end_used_bytes - start_used_bytes
acquired_bytes = end_acquired_bytes - start_acquired_bytes
depth = len(self._running_stack)
self.call_history.append(
(function._impl_name, used_bytes, acquired_bytes, depth))
if depth == 0:
self._total_used_bytes += used_bytes
self._total_acquired_bytes += acquired_bytes
def forward_postprocess(self, function, in_data):
self._postprocess(function)
def backward_postprocess(self, function, in_data, out_grad):
self._postprocess(function)
def total_used_bytes(self):
"""Returns total bytes that functions used from cupy memory pool."""
return self._total_used_bytes
def total_acquired_bytes(self):
"""Returns total bytes that cupy memory pool acquired from GPU."""
return self._total_acquired_bytes
def summary(self):
"""Returns a summary of memory profiling in functions.
Returns:
A summarized dictionary whose keys are function names and
values are dictionaries of
``used_bytes``, ``acquired_bytes``, and ``occurrrence``.
"""
# TODO(sonots): PROBLEM: takes count of nested functions duplicately
summary = collections.OrderedDict()
for func_name, used_bytes, acquired_bytes, depth in self.call_history:
if func_name not in summary:
summary[func_name] = {'used_bytes': 0,
'acquired_bytes': 0, 'occurrence': 0}
record = summary[func_name]
record['used_bytes'] += used_bytes
record['acquired_bytes'] += acquired_bytes
record['occurrence'] += 1
return summary
def _choose_unit(self, size):
"""Choose optimal unit.
Returns:
Tuple of denomi (float) and human-readable unit (str).
"""
denomi = 1.0
if size <= 0:
return denomi, self._units[0]
for unit in self._units[:-1]:
if size / (denomi * 1024) < 1:
return denomi, unit
denomi *= 1024
return denomi, self._units[-1]
def print_report(self, unit='auto', file=sys.stdout):
"""Prints a summary report of memory profiling in functions.
Args:
unit (str): Supplementary units used for used memories.
`B`, `KB`, `MB`, `GB`, `TB`, `PB`, `EB`, `ZB`, `auto`(default)
and `auto_foreach` are supported. If `auto`, units of memories
are aligned to the largest values of 'used_bytes' and
'acquired_bytes'. If `auto_foreach`, units of memories are
adjusted for each element.
"""
entries = [[
'FunctionName', 'UsedBytes', 'AcquiredBytes', 'Occurrence']]
if unit == 'auto':
max_used = max(
record['used_bytes'] for record in self.summary().values())
max_acquired = max(
record['acquired_bytes'] for record in self.summary().values())
denomi_used, unit_used = self._choose_unit(max_used)
denomi_acquired, unit_acquired = self._choose_unit(max_acquired)
elif unit != 'auto_foreach':
denomi_used = denomi_acquired = self._table[unit]
unit_used = unit_acquired = unit
for function_name, record in self.summary().items():
used_bytes = record['used_bytes']
acquired_bytes = record['acquired_bytes']
if unit == 'auto_foreach':
denomi_used, unit_used = self._choose_unit(used_bytes)
denomi_acquired, unit_acquired = self._choose_unit(
acquired_bytes)
used_bytes = '%3.2f%s' % (used_bytes / denomi_used, unit_used)
acquired_bytes = '%3.2f%s' % (
acquired_bytes / denomi_acquired, unit_acquired)
occurrence = str(record['occurrence'])
entries.append(
[function_name, used_bytes, acquired_bytes, occurrence])
entry_widths = []
entry_widths.append(max(len(f) for f, _, _, _ in entries))
entry_widths.append(max(len(u) for _, u, _, _ in entries))
entry_widths.append(max(len(a) for _, _, a, _ in entries))
entry_widths.append(max(len(o) for _, _, _, o in entries))
template = ' '.join('{:>%d}' % w for w in entry_widths)
for function_name, used_bytes, acquired_bytes, occurrence in entries:
line = template.format(
function_name, used_bytes, acquired_bytes, occurrence)
file.write(line)
file.write('\n')
if hasattr(file, 'flush'):
file.flush()
class CupyMemoryCumulativeHook(MemoryHook):
"""A simple memory hook for cupy measuring memory usage cumulatively.
Attributes:
used_bytes (int): cumulative bytes that application used from cupy
memory pool.
acquired_bytes (int): cumulative bytes that cupy memory pool acquired
from GPU device.
"""
name = 'CupyMemoryCumulativeHook'
def __init__(self):
self.used_bytes = 0
self.acquired_bytes = 0
def alloc_preprocess(self, **kwargs):
self.acquired_bytes += kwargs['mem_size']
def malloc_preprocess(self, **kwargs):
self.used_bytes += kwargs['mem_size']
| 8,573
| 38.694444
| 132
|
py
|
chainer
|
chainer-master/chainer/function_hooks/debug_print.py
|
import sys
import warnings
from chainer import backend
from chainer import function_hook
from chainer import variable
class PrintHook(function_hook.FunctionHook):
"""Function hook that prints debug information.
This function hook outputs the debug information of input arguments of
``forward`` and ``backward`` methods involved in the hooked functions
at preprocessing time (that is, just before each method is called).
Unlike simple "debug print" technique, where users insert print functions
at every function to be inspected, we can show the information
of all functions involved with single ``with`` statement.
Further, this hook enables us to show the information of
``backward`` methods without inserting print functions into
Chainer's library code.
Args:
sep: *(deprecated since v4.0.0)* Ignored.
end: Character to be added at the end of print function.
file: Output file_like object that that redirect to.
flush: If ``True``, this hook forcibly flushes the text stream
at the end of preprocessing.
.. admonition:: Example
The basic usage is to use it with ``with`` statement.
>>> from chainer import function_hooks
>>> l = L.Linear(10, 10)
>>> x = chainer.Variable(np.zeros((1, 10), np.float32))
>>> with chainer.function_hooks.PrintHook():
... y = l(x)
... z = F.sum(y)
... z.backward() # doctest:+SKIP
In this example, ``PrintHook`` shows the debug information of
forward propagation of ``LinearFunction`` (which is implicitly
called by ``l``) and ``Sum`` (called by ``F.sum``)
and backward propagation of ``z`` and ``y``.
"""
name = 'PrintHook'
def __init__(self, sep=None, end='\n', file=sys.stdout, flush=True):
if sep is not None:
warnings.warn('sep argument in chainer.function_hooks.PrintHook '
'is deprecated.', DeprecationWarning)
self.sep = sep # Keep sep because it was originally documented
self.end = end
self.file = file
self.flush = flush
def _print(self, msg):
self.file.write(msg + self.end)
def _process(self, function, in_data, out_grad=None):
self._print('function\t{}'.format(function.label))
self._print('input data')
for d in in_data:
if d is None:
# Some inputs can be removed with `retain_grad`.
self._print('(removed)')
continue
self._print(variable.Variable(d).debug_print())
if out_grad is not None:
self._print('output gradient')
for d in out_grad:
if d is None:
v = variable.Variable()
else:
xp = backend.get_array_module(d)
v = variable.Variable(xp.zeros_like(d, dtype=d.dtype))
v.grad = d
self._print(v.debug_print())
if self.flush and hasattr(self.file, 'flush'):
self.file.flush()
def forward_preprocess(self, function, in_data):
self._process(function, in_data)
def backward_preprocess(self, function, in_data, out_grad):
self._process(function, in_data, out_grad)
| 3,334
| 35.648352
| 77
|
py
|
chainer
|
chainer-master/chainer/function_hooks/__init__.py
|
# import classes and functions
from chainer.function_hooks.cuda_profile import CUDAProfileHook # NOQA
from chainer.function_hooks.cupy_memory_profile import CupyMemoryProfileHook # NOQA
from chainer.function_hooks.debug_print import PrintHook # NOQA
from chainer.function_hooks.timer import TimerHook # NOQA
| 312
| 51.166667
| 84
|
py
|
chainer
|
chainer-master/chainer/utils/error.py
|
def _format_array_props(arrays):
# Formats array shapes and dtypes for error messages.
assert isinstance(arrays, (list, tuple))
return ', '.join([
None if arr is None
else '{}:{}'.format(arr.shape, arr.dtype.name)
for arr in arrays])
| 271
| 29.222222
| 57
|
py
|
chainer
|
chainer-master/chainer/utils/type_check.py
|
import contextlib
import functools
import operator
import sys
import threading
import numpy
import six
import chainer
from chainer.backends import cuda
_thread_local = threading.local()
@contextlib.contextmanager
def get_function_check_context(f):
try:
default = _thread_local.current_function
except AttributeError:
default = None
_thread_local.current_function = f
try:
yield
finally:
_thread_local.current_function = default
class TypeInfo(object):
"""Type information of an input/gradient array.
It contains type information of an array, such as the shape of array and
the number of dimensions.
This information is independent of CPU or GPU array.
"""
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
self.ndim = len(shape)
@property
def size(self):
return functools.reduce(operator.mul, self.shape, 1)
class TypeInfoTuple(tuple):
"""Type information of input/gradient tuples.
It is a sub-class of tuple containing :class:`TypeInfo`. The i-th element
of this object contains type information of the i-th input/gradient data.
As each element is :class:`Expr`, you can easily check its validity.
"""
def size(self):
"""Returns an expression representing its length.
Returns:
Expr: An expression object representing length of the tuple.
"""
return Variable(len(self), '{0}.size'.format(self.name))
class LightTypeInfoTuple(tuple):
"""Type information of input/gradient tuples for light-weight check.
It is a sub-class of tuple containing :class:`TypeInfo`. The i-th element
of this object contains type information of the i-th input/gradient data.
"""
def size(self):
"""Returns its length.
Returns:
int: Length of the tuple.
"""
return len(self)
def get_types(data, name, accept_none, *, shapes=None):
assert isinstance(data, tuple)
if shapes is None:
shapes = tuple([x.shape for x in data])
info = TypeInfoTuple(
_get_type(name, i, x, accept_none, shape)
for i, (x, shape) in enumerate(zip(data, shapes)))
# I don't know a method to set an attribute in an initializer of tuple.
info.name = name
return info
def get_light_types(data, *, shapes=None):
assert(isinstance(data, tuple))
if shapes is None:
data_ = data
else:
# For non-default memory format (e.g. NHWC), shapes of data are
# different from the semantic shapes (e.g. NCHW). In such cases
# semantic shapes are explicitly given as `shapes` argument.
# If it is given, TypeInfos with modified shapes are wrapped.
data_ = tuple([
TypeInfo(shape, x.dtype) for x, shape in zip(data, shapes)])
return LightTypeInfoTuple(data_)
def _get_type(name, index, array, accept_none, shape):
var = '{0}[{1}]'.format(name, index)
if accept_none and array is None:
# case that gradient is not given
return Variable(TypeInfo((), None), var)
assert isinstance(array, chainer.get_array_types())
return Variable(TypeInfo(shape, array.dtype), var)
def _make_un_operator(exp, priority, func):
def f(x):
return UnaryOperator(priority, x, exp, func)
return f
def _make_bin_operator(exp, priority, func, right_associative=False):
def f(x, y):
return BinaryOperator(priority, x, y, exp, func, right_associative)
return f
def _make_bool_operator(exp, inv, func):
def f(x, y):
return BoolBinaryOperator(x, y, exp, inv, func)
return f
def _flip(f):
return lambda x, y: f(y, x)
class Expr(object):
"""Abstract syntax tree of an expression.
It represents an abstract syntax tree, and isn't a value. You can get its
actual value with :meth:`eval` function, and get syntax representation with
the :meth:`__str__` method.
Each comparison operator (e.g. ``==``) generates a new :class:`Expr` object
which represents the result of comparison between two expressions.
.. admonition:: Example
Let ``x`` and ``y`` be instances of :class:`Expr`, then ::
>>> x = Variable(1, 'x')
>>> y = Variable(1, 'y')
>>> c = (x == y)
is also an instance of :class:`Expr`. To evaluate and get its value,
call :meth:`eval` method::
>>> c.eval()
True
Call ``str`` function to get a representation of the original
equation::
>>> str(c)
'x == y'
You can actually compare an expression with a value::
>>> (x == 1).eval()
True
Note that you can't use boolean operators such as ``and``, as they try
to cast expressions to boolean values::
>>> z = Variable(1, 'z')
>>> x == y and y == z # raises an error
Traceback (most recent call last):
RuntimeError: Don't convert Expr to bool. Please call Expr.eval \
method to evaluate expression.
"""
def __init__(self, priority):
self.priority = priority
def eval(self):
"""Evaluates the tree to get actual value.
Behavior of this function depends on an implementation class.
For example, a binary operator ``+`` calls the ``__add__`` function
with the two results of :meth:`eval` function.
"""
raise NotImplementedError()
def __getattr__(self, name):
return GetAttr(self, name)
def __getitem__(self, key):
return GetItem(self, key)
def __call__(self, *args):
return Call(self, args)
def __nonzero__(self):
# When a user calls a boolean operator like `(x == y and z == w)`,
# `and` operator evaluate the first expression.
# If it returns `True` (and it's default behavior), the `and` operator
# returns *the second expression*, not a boolean value.
# So, `(x == y and z == w)` returns the result of `z == w`, and
# `(x == y and z == w).expect()` raise no errors but only checks
# `z == w`. It is confusing.
# See also:
# https://docs.python.org/3/library/stdtypes.html
msg = ('An Expr instance cannot be evaluated as bool. '
'Please use chainer.utils.type_check.eval() to evaluate an '
'expression.')
raise RuntimeError(msg)
def __bool__(self):
self.__nonzero__()
__eq__ = _make_bool_operator('==', '!=', operator.__eq__)
__ne__ = _make_bool_operator('!=', '==', operator.__ne__)
__lt__ = _make_bool_operator('<', '>=', operator.__lt__)
__le__ = _make_bool_operator('<=', '>', operator.__le__)
__gt__ = _make_bool_operator('>', '<=', operator.__gt__)
__ge__ = _make_bool_operator('>=', '<', operator.__ge__)
# Please refer the Python documentation to know priority of operators.
# https://docs.python.org/3/reference/expressions.html
__add__ = _make_bin_operator('+', 4, operator.__add__)
__radd__ = _flip(__add__)
__sub__ = _make_bin_operator('-', 4, operator.__sub__)
__rsub__ = _flip(__sub__)
__mul__ = _make_bin_operator('*', 5, operator.__mul__)
__rmul__ = _flip(__mul__)
if sys.version_info < (3, 0, 0):
__div__ = _make_bin_operator('/', 5, operator.__div__) # type: ignore # NOQA
__rdiv__ = _flip(__div__)
else:
__truediv__ = _make_bin_operator('/', 5, operator.__truediv__)
__rtruediv__ = _flip(__truediv__)
__floordiv__ = _make_bin_operator('//', 5, operator.__floordiv__)
__rfloordiv__ = _flip(__floordiv__)
__mod__ = _make_bin_operator('%', 5, operator.__mod__)
__rmod__ = _flip(__mod__)
# Only '**' operator is right-associative
__pow__ = _make_bin_operator('**', 7, operator.__mod__,
right_associative=True)
__lshift__ = _make_bin_operator('<<', 3, operator.__lshift__)
__rlshift__ = _flip(__lshift__)
__rshift__ = _make_bin_operator('>>', 3, operator.__rshift__)
__rrshift__ = _flip(__rshift__)
__and__ = _make_bin_operator('&', 2, operator.__and__)
__rand__ = _flip(__and__)
__xor__ = _make_bin_operator('^', 1, operator.__xor__)
__rxor__ = _flip(__xor__)
__or__ = _make_bin_operator('|', 0, operator.__or__)
__ror__ = _flip(__or__)
__neg__ = _make_un_operator('-', 6, operator.__neg__)
__pos__ = _make_un_operator('+', 6, operator.__pos__)
__invert__ = _make_un_operator('~', 6, operator.__invert__)
def _eval_expr(v):
if isinstance(v, Expr):
return v.eval()
elif isinstance(v, list):
return list(map(_eval_expr, v))
elif isinstance(v, tuple):
return tuple(map(_eval_expr, v))
else:
return v
def _repr(v):
if isinstance(v, Expr):
return str(v)
elif isinstance(v, list):
return '[{0}]'.format(', '.join(map(_repr, v)))
elif isinstance(v, tuple):
if len(v) == 0:
return '()'
elif len(v) == 1:
return '({0},)'.format(_repr(v[0]))
else:
return '({0})'.format(', '.join(map(_repr, v)))
else:
return repr(v)
class Atom(Expr):
def __init__(self):
super(Atom, self).__init__(8)
class Constant(Atom):
def __init__(self, value):
super(Constant, self).__init__()
self.value = value
def __str__(self):
return _repr(self.value)
def eval(self):
return self.value
class Variable(Atom):
def __init__(self, value, name):
super(Variable, self).__init__()
self.value = value
self.name = name
def __str__(self):
return self.name
def eval(self):
return self.value
class GetAttr(Atom):
def __init__(self, obj, name):
super(GetAttr, self).__init__()
self.obj = obj
self.name = name
def __str__(self):
if isinstance(self.name, str):
return '{0}.{1}'.format(_repr(self.obj), self.name)
elif (isinstance(self.name, Constant) and
isinstance(self.name.value, str)):
return '{0}.{1}'.format(_repr(self.obj), self.name.value)
else:
return 'getattr({0}, {1})'.format(_repr(self.obj),
_repr(self.name))
def eval(self):
return getattr(_eval_expr(self.obj), _eval_expr(self.name))
def _str_subscript(exp):
if exp is Ellipsis:
return '...'
elif isinstance(exp, slice):
def key_str(v):
return '' if v is None else _repr(v)
if exp.step is None:
return '{0}:{1}'.format(key_str(exp.start),
key_str(exp.stop))
else:
return '{0}:{1}:{2}'.format(key_str(exp.start),
key_str(exp.stop),
key_str(exp.step))
elif isinstance(exp, tuple):
return ', '.join(map(_str_subscript, exp))
else:
return _repr(exp)
class GetItem(Atom):
def __init__(self, obj, key):
super(GetItem, self).__init__()
self.obj = obj
self.key = key
def __str__(self):
key = _str_subscript(self.key)
return '{0}[{1}]'.format(_repr(self.obj), key)
def eval(self):
return _eval_expr(self.obj)[_eval_expr(self.key)]
class Call(Atom):
def __init__(self, obj, args):
assert isinstance(args, tuple)
super(Call, self).__init__()
self.obj = obj
self.args = args
def __str__(self):
return '{0}({1})'.format(_repr(self.obj),
', '.join(map(_repr, self.args)))
def eval(self):
args = map(_eval_expr, self.args)
func = _eval_expr(self.obj)
return func(*args)
class UnaryOperator(Expr):
def __init__(self, priority, term, exp, func):
super(UnaryOperator, self).__init__(priority)
self.term = term
self.exp = exp
self.func = func
def eval(self):
return self.func(_eval_expr(self.term))
def __str__(self):
exp = _repr(self.term)
if isinstance(self.term, Expr) and self.term.priority < self.priority:
exp = '(' + exp + ')'
return self.exp + exp
class BinaryOperator(Expr):
def __init__(self, priority, lhs, rhs, exp, func, right_associative=False):
super(BinaryOperator, self).__init__(priority)
self.lhs = lhs
self.rhs = rhs
self.exp = exp
self.func = func
self.right_associative = right_associative
def eval(self):
left = self._eval_left()
right = self._eval_right()
return self.func(left, right)
def _eval_left(self):
return _eval_expr(self.lhs)
def _eval_right(self):
return _eval_expr(self.rhs)
def __str__(self):
# When an infix operator is left-associative, we need to append parens
# when rhs has the same priority
# e.g. x << (y << z) != x << y << z
left = _repr(self.lhs)
if isinstance(self.lhs, Expr) and (
self.priority > self.lhs.priority or
(self.right_associative and
self.priority == self.lhs.priority)):
left = '(' + left + ')'
right = _repr(self.rhs)
if isinstance(self.rhs, Expr) and (
self.priority > self.rhs.priority or
(not self.right_associative and
self.priority == self.rhs.priority)):
right = '(' + right + ')'
return '{0} {2} {1}'.format(left, right, self.exp)
class Testable(object):
def expect(self):
raise NotImplementedError()
class BoolBinaryOperator(BinaryOperator, Testable):
def __init__(self, lhs, rhs, exp, inv, func):
BinaryOperator.__init__(self, -1, lhs, rhs, exp, func)
self.inv = inv
def expect(self):
left = self._eval_left()
right = self._eval_right()
if not self.func(left, right):
raise InvalidType(
'{0} {1} {2}'.format(self.lhs, self.exp, self.rhs),
'{0} {1} {2}'.format(left, self.inv, right))
class InvalidType(Exception):
"""Raised when types of data for forward/backward are invalid.
"""
def __init__(self, expect, actual, msg=None):
if msg is None:
msg = 'Expect: {0}\nActual: {1}'.format(expect, actual)
if (hasattr(_thread_local, 'current_function')
and _thread_local.current_function is not None):
msg = '''
Invalid operation is performed in: {0} (Forward)
{1}'''.format(_thread_local.current_function.label, msg)
super(InvalidType, self).__init__(msg)
self.expect = expect
self.actual = actual
def __reduce__(self):
msg, = self.args
return (InvalidType, (self.expect, self.actual, msg))
def _argname(in_types, names):
"""Assigns user friendly names for the input types.
This function also asserts that lengths of in_types and names are the
same.
Args:
in_types (tuple of TypeInfoTuple): Tuple of type information to assign
name to.
names (tuple of str): Human-readable names of ``in_types``.
"""
if len(in_types) != len(names):
raise InvalidType(
'{} argument(s)'.format(str(len(names))),
'{} argument(s)'.format(str(len(in_types))),
'Invalid number of arguments')
for in_type, name in zip(in_types, names):
if isinstance(in_type, Variable):
in_type.name = name
def expect(*bool_exprs):
"""Evaluates and tests all given expressions.
This function evaluates given boolean expressions in order. When at least
one expression is evaluated as ``False``, that means the given condition is
not satisfied.
You can check conditions with this function.
Args:
bool_exprs (tuple of Bool expressions): Bool expressions you want to
evaluate.
"""
if in_light_mode():
if not all(bool_exprs):
raise InvalidType('', '')
else:
for expr in bool_exprs:
assert isinstance(expr, Testable)
expr.expect()
def same_types(*arrays):
for x in arrays:
if not isinstance(x, numpy.ndarray):
break
else:
return True
for x in arrays:
if not isinstance(x, cuda.ndarray):
return False
return True
def eval(exp):
if in_light_mode():
return exp
else:
return exp.eval()
def make_variable(value, name):
if in_light_mode():
return value
else:
return Variable(value, name)
def _make_variable_from_array(array, name):
if not isinstance(array, chainer.get_array_types()):
raise InvalidType(
'isinstance({}, ndarray)'.format(name),
'type({}) == {}'.format(name, type(array)),
)
if in_light_mode():
return array
else:
return Variable(TypeInfo(array.shape, array.dtype), name)
class LightMode(object):
def __enter__(self):
_thread_local.light_mode = True
def __exit__(self, exc_type, exc_value, traceback):
_thread_local.light_mode = False
def _prod_impl(xs):
result = 1
for x in xs:
result *= x
return result
_prod = Variable(_prod_impl, 'prod')
light_mode = LightMode()
def in_light_mode():
try:
return _thread_local.light_mode
except AttributeError:
_thread_local.light_mode = False
return False
def prod(xs):
if in_light_mode():
return _prod_impl(xs)
else:
return _prod(xs)
def expect_broadcast_shapes(*shape_types):
"""Checks if shapes can be broadcasted together.
Args:
shapes_types: Type-checked shapes of the arrays to broadcast.
"""
shapes = [eval(s) for s in shape_types]
error = None
try:
# simulate the shape calculation using zero-sized arrays
numpy.broadcast(*[numpy.empty(s + (0,)) for s in shapes])
except ValueError:
msgs = ['cannot broadcast inputs of the following shapes:']
for shape_type, shape in six.moves.zip(shape_types, shapes):
msgs.append('{} = {}'.format(shape_type, shape))
error = InvalidType('', '', msg='\n'.join(msgs))
if error is not None:
raise error
| 18,493
| 27.063733
| 85
|
py
|
chainer
|
chainer-master/chainer/utils/_collections.py
|
import collections
import weakref
import six
if six.PY3:
OrderedDict = collections.OrderedDict
else:
# Reference counting cannot free keys in old `collections.OrderedDict`,
# where a doubly linked list is used to maintain the order.
class OrderedDict(object):
"""Dictionary that remembers insertion order
This class wraps `collections.OrderedDict` to free keys by reference
counting.
"""
def __init__(self):
self.keys = set()
self.dict = collections.OrderedDict()
def __contains__(self, key):
return weakref.ref(key) in self.dict
def __setitem__(self, key, value):
self.keys.add(key)
self.dict[weakref.ref(key)] = value
def __getitem__(self, key):
return self.dict[weakref.ref(key)]
def items(self):
return [(k(), v) for k, v in self.dict.items()]
def values(self):
return self.dict.values()
| 994
| 25.184211
| 76
|
py
|
chainer
|
chainer-master/chainer/utils/nondeterministic.py
|
import warnings
from chainer import configuration
def nondeterministic(f_name):
"""Function to warn non-deterministic functions
If `config.warn_nondeterministic` is True, this function will give a
warning that this functions contains a non-deterministic function, such
as atomicAdd.
"""
if configuration.config.warn_nondeterministic:
warnings.warn(
'Potentially non-deterministic code is being executed while'
' config.warn_nondeterministic set. Source: ' + f_name)
| 526
| 30
| 75
|
py
|
chainer
|
chainer-master/chainer/utils/conv_nd.py
|
import itertools
import numpy
import six
from chainer.backends import cuda
from chainer.utils.conv import get_conv_outsize
from chainer.utils import conv_nd_kernel
def as_tuple(x, n):
if hasattr(x, '__getitem__'):
assert len(x) == n
return tuple(x)
return (x,) * n
def im2col_nd_cpu(img, ksize, stride, pad, pval=0, cover_all=False, dilate=1):
n, c = img.shape[0:2] # (n, c, d_1, d_2, ..., d_N)
dims = img.shape[2:]
ndim = len(dims)
dilate = as_tuple(dilate, ndim)
assert ndim == len(ksize) == len(stride) == len(pad)
outs = tuple(get_conv_outsize(d, k, s, p, cover_all, di)
for (d, k, s, p, di)
in zip(dims, ksize, stride, pad, dilate))
assert all(out > 0 for out in outs), 'Output sizes should be positive.'
# Pad around image.
pad_width = ((0, 0), (0, 0)) + tuple(
(p, p + s - 1) for (s, p) in zip(stride, pad))
img = numpy.pad(img, pad_width, mode='constant', constant_values=(pval,))
# Make patch array with which we will compute correlation with filter.
# shape: (n, c, k_1, k_2, ..., k_N, out_1, out_2, ..., out_N)
shape = (n, c) + ksize + outs
col = numpy.ndarray(shape, dtype=img.dtype)
# Fill the patch array.
colon = slice(None)
for kxs in itertools.product(*[six.moves.range(k) for k in ksize]):
# col[:, :, kx_1, kx_2, ..., kx_N, :, :, ..., :]
col_index = (colon, colon) + kxs + (colon,) * ndim
# img[:, :, kx_1:kx_lim_1:s_1, ..., kx_N:kx_lim_N:s_N]
kx_dilate = tuple(kx * di for (kx, di) in zip(kxs, dilate))
kx_lims = tuple(kx_di + s * out
for (kx_di, s, out) in zip(kx_dilate, stride, outs))
img_index = (colon, colon) + tuple(
slice(kx_di, kx_lim, s)
for (kx_di, kx_lim, s) in zip(kx_dilate, kx_lims, stride))
col[col_index] = img[img_index]
return col
def im2col_nd_gpu(img, ksize, stride, pad, cover_all=False, dilate=1):
n, c = img.shape[0:2] # (n, c, d_1, d_2, ..., d_N)
dims = img.shape[2:]
ndim = len(dims)
dilate = as_tuple(dilate, ndim)
assert ndim == len(ksize) == len(stride) == len(pad)
outs = tuple(get_conv_outsize(d, k, s, p, cover_all, di)
for (d, k, s, p, di)
in zip(dims, ksize, stride, pad, dilate))
assert all(out > 0 for out in outs), 'Output sizes should be positive.'
# col_shape: (n, c, k_1, k_2, ..., k_N, out_1, out_2, ..., out_N)
shape = (n, c) + ksize + outs
col = cuda.cupy.empty(shape, dtype=img.dtype)
in_params, out_params, operation, name = \
conv_nd_kernel.Im2colNDKernel.generate(ndim)
cuda.elementwise(in_params, out_params, operation, name)(
img.reduced_view(),
*(dims + outs + ksize + stride + pad + dilate + (col,)))
return col
def im2col_nd(img, ksize, stride, pad, cover_all=False, dilate=1):
fn = im2col_nd_gpu if isinstance(img, cuda.ndarray) else im2col_nd_cpu
return fn(img, ksize, stride, pad, cover_all=cover_all, dilate=dilate)
def col2im_nd_cpu(col, stride, pad, dims, dilate=1):
n, c = col.shape[:2] # (n, c, kx_1, ..., kx_N, out_1, ..., out_N)
mid = (len(col.shape) - 2) // 2 + 2
ksize = col.shape[2:mid]
outs = col.shape[mid:]
colon = slice(None)
ndim = len(outs)
dilate = as_tuple(dilate, ndim)
assert len(ksize) == len(stride) == len(pad) == len(dims) == ndim
# Image with padded size.
img_shape = (n, c) + tuple(d + 2 * p + s - 1
for (d, p, s) in zip(dims, pad, stride))
img = numpy.zeros(img_shape, dtype=col.dtype)
for kxs in itertools.product(*[six.moves.range(k) for k in ksize]):
# (:, :, kx_1:kx_lim_1:s_1, ..., kx_N:kx_lim_N:s_N)
kx_dilate = tuple(kx * di for (kx, di) in zip(kxs, dilate))
kx_lims = tuple(kx_di + s * out
for (kx_di, s, out) in zip(kx_dilate, stride, outs))
img_index = (colon, colon) + tuple(
slice(kx_di, kx_lim, s)
for (kx_di, kx_lim, s) in zip(kx_dilate, kx_lims, stride))
# (:, :, kx_1, kx_2, ..., kx_N, :, :, ..., :)
col_index = (colon, colon) + kxs + (colon,) * len(outs)
img[img_index] += col[col_index]
# (:, :, p_1:d_1 + p_1, p_2:d_2 + p_2, ..., p_N:d_N + p_N]
img_index = (colon, colon) + tuple(
slice(p, d + p) for (p, d) in zip(pad, dims))
return img[img_index]
def col2im_nd_gpu(col, stride, pad, dims, dilate=1):
n, c = col.shape[:2] # (n, c, k_1, ..., k_N, out_1, ..., out_N)
mid = (len(col.shape) - 2) // 2 + 2
ksize = col.shape[2:mid]
outs = col.shape[mid:]
ndim = len(dims)
dilate = as_tuple(dilate, ndim)
assert len(outs) == len(ksize) == len(stride) == len(pad) == ndim
img_shape = (n, c) + dims # (n, c, d_1, d_2, ..., d_N)
img = cuda.cupy.empty(img_shape, dtype=col.dtype)
in_params, out_params, operation, name = \
conv_nd_kernel.Col2imNDKernel.generate(ndim)
cuda.elementwise(in_params, out_params, operation, name)(
col.reduced_view(),
*(dims + outs + ksize + stride + pad + dilate + (img,)))
return img
def col2im_nd(col, stride, pad, dims, dilate=1):
fn = col2im_nd_gpu if isinstance(col, cuda.ndarray) else col2im_nd_cpu
return fn(col, stride, pad, dims, dilate)
| 5,382
| 36.643357
| 78
|
py
|
chainer
|
chainer-master/chainer/utils/array.py
|
import warnings
import numpy
import six
import chainer
from chainer.backends import cuda
def as_vec(x):
warnings.warn(
'chainer.utils.array.as_vec is deprecated. Please refer to '
'numpy.ravel or other array backend functions to flatten ndarrays.',
DeprecationWarning)
if x.ndim == 1:
return x
return x.ravel()
def as_mat(x):
warnings.warn(
'chainer.utils.array.as_mat is deprecated. Please refer to '
'numpy.reshape or other array backend functions to reshape ndarrays.',
DeprecationWarning)
if x.ndim == 2:
return x
return x.reshape(len(x), -1)
def empty_like(x):
warnings.warn(
'chainer.utils.array.empty_like is deprecated. Please refer to '
'numpy.empty_like or other array backend functions to initialize '
'empty arrays.',
DeprecationWarning)
if cuda.available and isinstance(x, cuda.ndarray):
return cuda.cupy.empty_like(x)
else:
return numpy.empty_like(x)
def size_of_shape(shape):
size = 1
for i in shape:
size *= i
# should not return long in Python 2
return int(size)
def sum_to(x, shape):
if x.shape == shape:
return x
if isinstance(x, chainer.Variable):
raise TypeError(
'chainer.utils.sum_to does not support Variable input. '
'Use chainer.functions.sum_to instead.')
ndim = len(shape)
lead = x.ndim - ndim
lead_axis = tuple(six.moves.range(lead))
axis = tuple([i + lead for i, sx in enumerate(shape) if sx == 1])
y = x.sum(lead_axis + axis, keepdims=True)
if lead > 0:
y = y.squeeze(lead_axis)
return y
| 1,688
| 24.590909
| 78
|
py
|
chainer
|
chainer-master/chainer/utils/sparse.py
|
import numpy
import chainer
from chainer import backend
from chainer import cuda
def _add_at(add_at, x, row, col, data):
assert data.size > 0
last_nz = data.size - (data != 0)[::-1].argmax()
add_at(x, (row[:last_nz], col[:last_nz]), data[:last_nz])
class CooMatrix(object):
"""A sparse matrix in COO format.
Args:
data (:ref:`ndarray`): The entries of the matrix.
The entries are usually non-zero-elements in the matrix.
row (:ref:`ndarray`): The row indices of the matrix
entries.
col (:ref:`ndarray`): The column indices of the matrix
entries.
shape (tuple of int): The shape of the matrix in dense format.
order ('C', 'F', 'other' or None): If ``'C'``, the maxtix is assumed
that its row indices are sorted. If ``'F'``, the matrix is assumed
that its column indices are sorted. If ``'other'``, the matrix is
assumed as neither 'C' order nor 'F' order. If ``None`` (this is
the default), the matrix is automatically checked if it is 'C'
order, 'F' order or another. This information will be used by some
functions like :func:`~chainer.functions.sparse_matmul` as a hint
to improve performance.
requires_grad (bool): If ``True``, gradient of this sparse matrix will
be computed in back-propagation.
.. seealso::
See :func:`~chainer.utils.to_coo` for how to construct a COO matrix
from an array.
"""
def __init__(self, data, row, col, shape, order=None,
requires_grad=False):
if not (1 <= data.ndim <= 2):
raise ValueError('ndim of data must be 1 or 2.')
if not (data.ndim == row.ndim == col.ndim):
raise ValueError('ndim of data, row and col must be the same.')
if len(shape) != 2:
raise ValueError('length of shape must be 2.')
if not (shape[0] > 0 and shape[1] > 0):
raise ValueError('numbers in shape must be greater than 0.')
if order not in ('C', 'F', 'other', None):
raise ValueError('order must be \'C\', \'F\', \'other\' or None.')
self.data = chainer.Variable(data, requires_grad=requires_grad)
self.row = row
self.col = col
self.shape = shape # (row, col)
self.order = order
if order is None:
self.order = get_order(row, col)
def to_dense(self):
"""Returns a dense matrix format of this sparse matrix."""
data = self.data
if data.ndim == 1:
shape = self.shape
elif data.ndim == 2:
shape = (data.shape[0], *self.shape)
else:
assert False
xp = data.xp
x = xp.zeros(shape, dtype=data.dtype)
if data.size > 0:
row = self.row
col = self.col
if xp is numpy:
add_at = numpy.add.at
elif xp is cuda.cupy:
add_at = cuda.cupyx.scatter_add
data = data.array
if data.ndim == 1:
_add_at(add_at, x, row, col, data)
elif data.ndim == 2:
for i in range(data.shape[0]):
_add_at(add_at, x[i], row[i], col[i], data[i])
else:
assert False
return x
def to_coo(x, ldnz=None, requires_grad=False):
"""Returns a single or a batch of matrices in COO format.
Args:
x (:ref:`ndarray`): Input dense matrix. The ndim of
``x`` must be two or three. If ndim is two, it is treated as
a single matrix. If three, it is treated as batched matrices.
ldnz (int): Size of arrays for data, row index and column index to be
created. The Actual size becomes max(nnz, ldnz) where nnz is number
of non-zero elements in a input dense matrix.
requires_grad (bool): If ``True``, gradient of sparse matrix will be
computed in back-propagation.
Returns:
~chainer.utils.CooMatrix: A sparse matrix or batched sparse matrices
in COO format of a given dense matrix or batched dense matrices.
.. admonition:: Example
Create a :class:`~chainer.utils.CooMatrix` from an array with 2
non-zero elements and 4 zeros and access its attributes. No batch
dimension is involved.
.. doctest::
>>> data = np.array([[0, 2, 0], [-1, 0, 0]], np.float32)
>>> x = chainer.utils.to_coo(data)
>>> x.data
variable([ 2., -1.])
>>> x.row
array([0, 1], dtype=int32)
>>> x.col
array([1, 0], dtype=int32)
>>> x.shape
(2, 3)
"""
xp = backend.get_array_module(x)
if x.ndim == 2:
_row, _col = xp.where(x != 0)
nnz = len(_row)
if ldnz is None or ldnz < nnz:
ldnz = nnz
data = xp.zeros((ldnz), dtype=x.dtype)
row = xp.full((ldnz), -1, dtype=xp.int32)
col = xp.full((ldnz), -1, dtype=xp.int32)
data[:nnz] = x[_row, _col]
row[:nnz] = xp.array(_row).astype(xp.int32)
col[:nnz] = xp.array(_col).astype(xp.int32)
shape = x.shape
return CooMatrix(data, row, col, shape,
requires_grad=requires_grad)
elif x.ndim == 3:
# first axis is batch axis
nb = x.shape[0]
if ldnz is None:
ldnz = 0
for i in range(nb):
ldnz = max(ldnz, len(xp.where(x[i] != 0)[0]))
data = xp.empty((nb, ldnz), dtype=x.dtype)
row = xp.empty((nb, ldnz), dtype=xp.int32)
col = xp.empty((nb, ldnz), dtype=xp.int32)
for i in range(nb):
coo = to_coo(x[i], ldnz)
data[i] = coo.data.data
row[i] = coo.row
col[i] = coo.col
shape = x.shape[1:]
return CooMatrix(data, row, col, shape,
requires_grad=requires_grad)
else:
raise ValueError('ndim of x must be 2 or 3.')
def get_order(row, col):
"""Check if a coo matrix with given row and col is C or F order.
Args:
row (:ref:`ndarray`): The row indices of the matrix
entries.
col (:ref:`ndarray`): The column indices of the matrix
entries.
Returns:
Returns ``'C'`` when a coo matrix with given row and column indices is
C order, in other words, the row indices are sorted. Returns ``'F'``
when it is F order, in other words, the column indices are sorted.
Returns ``'other'`` otherwise.
"""
if _is_c_order(row, col):
return 'C'
if _is_c_order(col, row):
return 'F'
return 'other'
def _is_c_order(row, col):
"""Check if a coo matrix with given row and col is c_order"""
if row.shape != col.shape:
raise ValueError('shape of row and col must be the same.')
if row.ndim != 1:
for i in range(row.shape[0]):
if not _is_c_order(row[i], col[i]):
return False
return True
xp = backend.get_array_module(row)
_row = row[col >= 0]
_col = col[row >= 0]
if _row[_row < 0].size > 0 or _col[_col < 0].size:
raise ValueError('invalid index combination of row and col.')
if _row.shape[0] <= 1:
return True
row_diff = xp.zeros(_row.shape, dtype=_row.dtype)
row_diff[1:] = _row[1:] - _row[:-1]
if xp.amin(row_diff) < 0:
return False
col_diff = xp.zeros(_col.shape, dtype=_col.dtype)
col_diff[1:] = _col[1:] - _col[:-1]
col_diff[(row_diff > 0)] = 0
return xp.amin(col_diff) >= 0
| 7,680
| 34.725581
| 79
|
py
|
chainer
|
chainer-master/chainer/utils/argument.py
|
import inspect
def check_unexpected_kwargs(kwargs, **unexpected):
for key, message in unexpected.items():
if key in kwargs:
raise ValueError(message)
def parse_kwargs(kwargs, *name_and_values, **unexpected):
values = [kwargs.pop(name, default_value)
for name, default_value in name_and_values]
if kwargs:
check_unexpected_kwargs(kwargs, **unexpected)
caller = inspect.stack()[1]
args = ', '.join(repr(arg) for arg in sorted(kwargs.keys()))
message = caller[3] + \
'() got unexpected keyword argument(s) {}'.format(args)
raise TypeError(message)
return tuple(values)
def assert_kwargs_empty(kwargs):
# It only checks if kwargs is empty.
parse_kwargs(kwargs)
| 773
| 28.769231
| 68
|
py
|
chainer
|
chainer-master/chainer/utils/precision.py
|
import functools
import numpy
def _fp16_mixed_precision_helper(fn):
"""Decorator to perform computation in FP32 for FP16 inputs/outputs
Decorator to perform forward computation in FP32 for FP16 inputs,
returning outputs casted back to FP16. Do nothing for FP32 and FP64
inputs.
"""
@functools.wraps(fn)
def wrapper(self, in_data):
flag = all([x.dtype.kind != 'f' or x.dtype == numpy.float16
for x in in_data])
in_data1 = []
for x in in_data:
if x.dtype == numpy.float16:
in_data1.append(x.astype(numpy.float32))
else:
in_data1.append(x)
in_data1 = tuple(in_data1)
out_data = fn(self, in_data1)
if flag:
out_data1 = []
for y in out_data:
if y is not None and y.dtype == numpy.float32:
out_data1.append(y.astype(numpy.float16))
else:
out_data1.append(y)
out_data = tuple(out_data1)
return out_data
return wrapper
| 1,091
| 27
| 71
|
py
|
chainer
|
chainer-master/chainer/utils/walker_alias.py
|
import numpy
import chainer
from chainer.backends import cuda
from chainer import device_resident
class WalkerAlias(device_resident.DeviceResident):
"""Implementation of Walker's alias method.
This method generates a random sample from given probabilities
:math:`p_1, \\dots, p_n` in :math:`O(1)` time.
It is more efficient than :func:`~numpy.random.choice`.
This class works on both CPU and GPU.
Args:
probs (float list): Probabilities of entries. They are normalized with
`sum(probs)`.
See: `Wikipedia article <https://en.wikipedia.org/wiki/Alias_method>`_
"""
def __init__(self, probs):
super(WalkerAlias, self).__init__()
prob = numpy.array(probs, numpy.float32)
prob /= numpy.sum(prob)
threshold = numpy.ndarray(len(probs), numpy.float32)
values = numpy.ndarray(len(probs) * 2, numpy.int32)
il, ir = 0, 0
pairs = list(zip(prob, range(len(probs))))
pairs.sort()
for prob, i in pairs:
p = prob * len(probs)
while p > 1 and ir < il:
values[ir * 2 + 1] = i
p -= 1.0 - threshold[ir]
ir += 1
threshold[il] = p
values[il * 2] = i
il += 1
# fill the rest
for i in range(ir, len(probs)):
values[i * 2 + 1] = 0
assert((values < len(threshold)).all())
self.threshold = threshold
self.values = values
@property
def use_gpu(self):
# TODO(niboshi): Maybe better to deprecate the property.
device = self.device
xp = device.xp
if xp is cuda.cupy:
return True
elif xp is numpy:
return False
raise RuntimeError(
'WalkerAlias.use_gpu attribute is only applicable for numpy or '
'cupy devices. Use WalkerAlias.device attribute for general '
'devices.')
def device_resident_accept(self, visitor):
super(WalkerAlias, self).device_resident_accept(visitor)
self.threshold = visitor.visit_array(self.threshold)
self.values = visitor.visit_array(self.values)
def sample(self, shape):
"""Generates a random sample based on given probabilities.
Args:
shape (tuple of int): Shape of a return value.
Returns:
Returns a generated array with the given shape. If a sampler is in
CPU mode the return value is a :class:`numpy.ndarray` object, and
if it is in GPU mode the return value is a :class:`cupy.ndarray`
object.
"""
device = self.device
xp = device.xp
with chainer.using_device(device):
if xp is cuda.cupy:
return self.sample_gpu(shape)
else:
return self.sample_xp(xp, shape)
def sample_xp(self, xp, shape):
thr_dtype = self.threshold.dtype
pb = xp.random.uniform(0, len(self.threshold), shape)
index = pb.astype(numpy.int32)
left_right = (
self.threshold[index]
< (pb.astype(thr_dtype) - index.astype(thr_dtype)))
left_right = left_right.astype(numpy.int32)
return self.values[index * 2 + left_right]
def sample_gpu(self, shape):
ps = cuda.cupy.random.uniform(size=shape, dtype=numpy.float32)
vs = cuda.elementwise(
'T ps, raw T threshold , raw S values, int32 b',
'int32 vs',
'''
T pb = ps * b;
int index = __float2int_rd(pb);
// fill_uniform sometimes returns 1.0, so we need to check index
if (index >= b) {
index = 0;
}
int lr = threshold[index] < pb - index;
vs = values[index * 2 + lr];
''',
'walker_alias_sample'
)(ps, self.threshold, self.values, len(self.threshold))
return vs
| 3,976
| 32.70339
| 78
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.