repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
chainer
|
chainer-master/chainermn/datasets/empty_dataset.py
|
def create_empty_dataset(dataset):
"""Creates an empty dataset for models with no inputs and outputs.
This function generates an empty dataset, i.e., ``__getitem__()`` only
returns ``None``. Its dataset is compatible with the original one.
Such datasets used for models which do not take any inputs,
neither return any outputs. We expect models, e.g., whose ``forward()``
is starting with ``chainermn.functions.recv()`` and ending with
``chainermn.functions.send()``.
Args:
dataset: Dataset to convert.
Returns:
~chainer.datasets.TransformDataset:
Dataset consists of only patterns in the original one.
"""
return [()] * len(dataset)
| 709
| 36.368421
| 75
|
py
|
chainer
|
chainer-master/chainermn/datasets/scatter.py
|
import warnings
import chainer.datasets
import numpy
class DataSizeError(RuntimeError):
pass
def scatter_dataset(dataset, comm, root=0, shuffle=False,
seed=None, max_buf_len=256 * 1024 * 1024,
*, force_equal_length=True):
"""Scatter the given dataset to the workers in the communicator.
The dataset of worker ``root``
(i.e., the worker whose ``comm.rank`` is ``root``) is
scattered to all workers. The given dataset of other workers are ignored.
The dataset is split to sub datasets of almost equal sizes and scattered
to workers. To create a sub dataset, ``chainer.datasets.SubDataset`` is
used.
Note::
Make sure ``force_equal_length`` flag is *not* off for
multinode evaluator or multinode updaters, which assume that
the iterator has the same lengths among processes to work
correctly.
Args:
dataset: A dataset (e.g., ``list``, ``numpy.ndarray``,
``chainer.datasets.TupleDataset``, ...).
comm: ChainerMN communicator or MPI4py communicator.
shuffle (bool): If ``True``, the order of examples is shuffled
before being scattered.
root (int): The root process of the scatter operation.
seed (int): Seed the generator used for the permutation of indexes.
If an integer being convertible to 32 bit unsigned integers is
specified, it is guaranteed that each sample
in the given dataset always belongs to a specific subset.
If ``None``, the permutation is changed randomly.
max_buf_len (int): Max buffer size to be used at broadcasting
binaries. Must not be larger than 2147483647.
force_equal_length (bool):
Force the scattered fragments of the dataset have equal
length. If ``True``, number of scattered examples is
guaranteed to be equal among processes and scattered
datasets may have duplication among processes. Otherwise,
number of scattered examples may not be equal among
processes, but scattered examples are guaranteed to have
no duplication among processes, intended for strict
evaluation of test dataset to avoid duplicated examples.
Returns:
Scattered dataset.
"""
assert 0 <= root and root < comm.size
order = None
if shuffle and dataset is not None:
n_total_samples = len(dataset)
order = numpy.random.RandomState(seed).permutation(
n_total_samples)
data = (dataset, order) if comm.rank == root else None
data = comm.bcast_obj(data, max_buf_len=max_buf_len, root=root)
assert data is not None
(dataset, order) = data
(b, e) = scatter_index(
len(dataset), comm, root,
force_equal_length=force_equal_length)
return chainer.datasets.SubDataset(dataset, b, e, order)
def scatter_index(n_total_samples, comm, root=0, *, force_equal_length=True):
'''Scatters only index to avoid heavy dataset broadcast
This is core functionality of ``scatter_dataset``, which is
almost equal to following code snippet::
(b, e) = scatter_index(len(dataset), comm)
order = None
if shuffle:
order = numpy.random.RandomState(seed).permutation(
n_total_samples)
order = comm.bcast_obj(order)
dataset = SubDataset(dataset, b, e, order)
Note::
Make sure ``force_equal_length`` flag is *not* off for
multinode evaluator or multinode updaters, which assume that
the iterator has the same lengths among processes to work
correctly.
Args:
n_total_samples (int): number of total samples to scatter
comm: ChainerMN communicator object
root (int): root rank to coordinate the operation
force_equal_length (bool):
Force the scattered fragments of the index have equal
length. If ``True``, number of scattered indices is
guaranteed to be equal among processes and scattered
datasets may have duplication among processes. Otherwise,
number of scattered indices may not be equal among
processes, but scattered indices are guaranteed to have
no duplication among processes, intended for strict
evaluation of test dataset to avoid duplicated examples.
Returns:
Tuple of two integers, that stands for beginning and ending
offsets of the assigned sub part of samples. The ending offset
is not border inclusive.
'''
if comm.rank == root:
for (i, b, e) in _scatter_index(n_total_samples, comm.size,
force_equal_length):
if i == root:
mine = (b, e)
else:
comm.send_obj((b, e), dest=i)
return mine
else:
return comm.recv_obj(source=root)
def _scatter_index(n_total_samples, size, force_equal_length):
assert size > 0
assert n_total_samples >= 0
if force_equal_length:
n_sub_samples = (n_total_samples + size - 1) // size
for i in range(size):
b = n_total_samples * i // size
e = b + n_sub_samples
yield (i, b, e)
return
else:
b = 0
stride = (n_total_samples // size) + 1
threshold = n_total_samples % size
for i in range(threshold):
e = b + stride
yield (i, b, e)
b += stride
stride = n_total_samples // size
for i in range(threshold, size):
e = b + stride
yield (i, b, e)
b += stride
return
def get_n_iterations_for_one_epoch(dataset, local_batch_size, comm):
"""Get the number of iterations for one epoch.
.. note::
This API is deprecated. Please use standard epoch triggers.
Args:
dataset: Sub dataset of each worker.
local_batch_size (int): Batch size of each worker.
comm: ChainerMN communicator or MPI4py communicator.
Returns:
int: the number of iterations for one epoch.
"""
warnings.warn(
'get_n_iterations_for_one_epoch is deprecated. Please use '
'standard epoch triggers.', DeprecationWarning)
n_iterations = None
if comm.rank == 0:
n_iterations = (len(dataset) + local_batch_size -
1) // local_batch_size
return comm.bcast_obj(n_iterations)
def get_epoch_trigger(n_epochs, dataset, local_batch_size, comm):
"""Get the trigger that behaves like an epoch trigger.
.. note::
This API is deprecated. Please use standard epoch triggers.
Args:
n_epochs (int): The number of epochs.
dataset: Sub dataset of each worker.
local_batch_size (int): Batch size of each worker.
comm: ChainerMN communicator or MPI4py communicator.
Returns:
The trigger that behaves like the epoch trigger.
"""
warnings.warn(
'get_epoch_trigger is deprecated. Please use standard epoch triggers.',
DeprecationWarning)
n_iterations = n_epochs * get_n_iterations_for_one_epoch(
dataset, local_batch_size, comm)
return n_iterations, 'iteration'
| 7,316
| 34.347826
| 79
|
py
|
chainer
|
chainer-master/chainermn/datasets/__init__.py
|
from chainermn.datasets.empty_dataset import create_empty_dataset # NOQA
from chainermn.datasets.scatter import DataSizeError # NOQA
from chainermn.datasets.scatter import scatter_index # NOQA
from chainermn.datasets.scatter import scatter_dataset # NOQA
| 259
| 51
| 73
|
py
|
chainer
|
chainer-master/chainermn/extensions/checkpoint.py
|
import errno
import os
import shutil
import tempfile
import time
import warnings
import chainer
from chainer.training import extension
def create_multi_node_checkpointer(name, comm, cp_interval=5,
gc_interval=5, path=None):
'''Create multi-node checkpointer object
Generational snapshot extension to allow fault tolerance;
It keeps several old snapshots to rollback synchronized
snapshot at each MPI process. Snapshot files are identified
as '<name>.<rank>.<iteration>'.
- <name> ... identifier of the run where snapshot is kept for
- <rank> ... which process owned the model
- <iteration> ... number of iteration.
This extension keeps several files for each execution and allows
users to resume the whole job at the latest snapshots of each MPI
process, and the iteration where all snapshots agrees.
As this object is a usual Chainer extension, users can just
create this object and pass to the trainer as an extension::
checkpointer = create_multi_node_checkpointer(name=run_id, comm=comm)
trainer.extend(checkpointer, trigger=(25, 'iteration'))
To run recovery at startup, before first iteration, run
checkpointer.maybe_load(trainer, optimizer)
before ``trainer.run()`` . If nothing is recovered (i.e. no
snapshot found), ``trainer.updater.iteration`` will remain ``0``
. Otherwise it will have the value of snapshot and the training
will resume from that iteration. ``optimizer`` is optional but
this will let multi node optimizer avoid initial broadcast when
all snapshot data among nodes are all in sync.
.. note:: Make sure that ``checkpointer.maybe_load`` is called
*after* all extensions with states, such as ``ExponentialShift``,
set to the trainer.
.. note:: The checkpointer is deprecated. Please use
:func:`chainermn.extensions.multi_node_snapshot` instead.
After training finished without errors all those temporary
checkpoints will be cleaned up at all nodes.
Another example to use checkpointer *without* trainer would be::
checkpointer = create_multi_node_checkpointer(name=run_id, comm=comm)
checkpointer.maybe_load(obj_you_want_to_snap, optimizer)
while True: ## Training loop
...
updater.update()
...
checkpointer.save(obj_you_want_to_snap) # Make a checkpoint
Args:
name (str): unique id of the run
comm: communicater in ChainerMN
cp_interval (int): minimum number of checkpoints to preserve
gc_interval (int): interval to collect non-preserved checkpoints
'''
warnings.warn('Checkpointer is deprecated. '
'Use chainer.extensions.multi_node_snapshot instead.',
DeprecationWarning)
return _MultiNodeCheckpointer(name, comm, cp_interval, gc_interval, path)
class _CheckpointStats(object):
def __init__(self):
self.timings = []
self.begin = None
def start(self):
self.begin = time.time()
def end(self):
e = time.time()
if self.begin is None:
return
self.timings.append({'b': self.begin, 'd': e - self.begin})
self.begin = None
def report(self):
count = len(self.timings)
if count == 0:
return 'No stats available'
durations = [t['d'] for t in self.timings]
average = sum(durations) / count
fmt_str = 'Snapshot duration stats (sec): avg={:f}, min={:f}, max={:f}'
return fmt_str.format(average, min(durations), max(durations))
class _MultiNodeCheckpointer(extension.Extension):
def __init__(self, name, comm, cp_interval, gc_interval, path):
self.name = name
self.cp_interval = cp_interval
self.gc_interval = gc_interval
self.comm = comm
self.files = []
self.stats = _CheckpointStats()
# TODO(kuenishi): support path expression such as
# 'path/{rank}/snapshot' or 'path/{host}/snapshot'
if path is not None:
self.path = path
_maybe_makedirs(self.path)
else:
self.path = None
assert name is not None
assert self.cp_interval > 0
assert self.gc_interval > 0
assert self.comm is not None
def __call__(self, trainer):
# This is supposed to be called at the exact same interval
# among all nodes
if self.path is None:
# Note: In a non-trainer use case this path will fail; You
# shouldn't pass None at __init__().
self.path = trainer.out
self.save(trainer, trainer.updater.iteration)
def save(self, target, iteration):
'''Take snapshots of a target (mostly trainer) at each node
This must be called at all nodes synchronously at the same
timing of same iteration.
'''
# TODO(kuenishi): Possibly taking checksum on snapshot file
# may help model loading more reliable ... snapshot_object is
# smart that uses temporary files and then moving the file,
# which prevents partial write by atomic operation. If we
# assume external hands such as bit rot or file truncate we
# need this. In current implementation manual removal of
# latest snapshot files will let recovery happen against
# next-latest snapshot.
filename = self._filename(iteration)
self.stats.start()
_save(self.path, filename, target)
self.stats.end()
self.files.append(filename)
if len(self.files) - self.cp_interval > 5:
# remove older snapshots, and broadcast latest list
self._sync_file_list(remove_remainder=True)
def finalize(self):
'''Finalize checkpointer
Clean up all intermediate snapshots.
'''
assert self.path is not None
files2remove = self.files
for file in files2remove:
filename = os.path.join(self.path, file)
try:
os.remove(filename)
except Exception:
pass
self.files = []
def get_stats(self):
'''Get statistics of taking snapshots
After or during training, checkpointer holds statistics on
saving checkpoints such as average time, minimum and maximum
time. With this stats users may identify slow nodes or disk,
or know average time penalty of taking snapshot and optmize
interval to take snapshots.
'''
return self.stats.report()
def _sync_file_list(self, remove_remainder=False):
file_lists = self.comm.gather_obj(self.files)
iters0 = None
if self.comm.rank == 0:
if file_lists is not None:
if len(file_lists) == 0:
self.files = []
return
iters0 = set(
[i for _, _, i in self._parse_filenames(file_lists[0])])
for files in file_lists[1:]:
iters = set(
[i for _, _, i in self._parse_filenames(files)])
iters0 &= iters
iters0 = list(iters0)
iters0.sort()
iters0 = iters0[-self.cp_interval:]
else:
raise RuntimeError('Can\'t gather checkpoint file names')
iters0 = self.comm.bcast_obj(iters0)
files = self._filenames(iters0)
if remove_remainder:
files2remove = set(self.files) - set(files)
for file in files2remove:
try:
os.remove(os.path.join(self.path, file))
except Exception:
pass
self.files = files
def _filenames(self, iterations):
return [self._filename(i) for i in iterations]
def _filename(self, iteration):
# TODO(kuenishi): As a node identifier, should we use node
# name (e.g. hostname) or MPI rank?
#
# hostname is fine when MPI rank changes among same set of nodes.
# MPI rank is fine when node fails and a new node has come.
filename = '{:s}.{:d}.{:d}'.format(
self.name, self.comm.rank, iteration)
return filename
def _parse_filenames(self, filenames):
# extract filenames and return [ <iteration> ]
return [self._parse_filename(f) for f in filenames]
def _parse_filename(self, filename):
tpl = filename.split('.')
if len(tpl) != 3:
return
name, rank, iter = tpl
if name != self.name:
return
return name, int(rank), int(iter)
def maybe_load(self, trainer, optimizer=None, path=None):
'''If there's existing model, load, sync, and resume.
'''
if self.path is None:
if path is not None:
self.path = path
else:
self.path = trainer.out
local_files = []
try:
local_files = os.listdir(self.path)
except Exception:
# Maybe I am the only process that does not have result
# directory
pass
local_iters = filter(None, self._parse_filenames(local_files))
local_iters = [i for name, rank, i in local_iters if name ==
self.name and rank == self.comm.rank]
self.files = self._filenames(local_iters)
# Collect common file list
self._sync_file_list()
# Get set of common snapshot numbers (=iteration number)
iters = [i for name, rank, i in self._parse_filenames(self.files)]
if iters:
# Adopt latest snapshot from iteration number
i = max(iters)
# Note that checkpointer only verifies file name - if
# exception happens here, currently manual deletion of
# *latest* snapshot may checkpointer work sanely against
# one older snapshot
_load(self.path, self._filename(i), trainer)
if optimizer is not None:
# If this is a complete resume, no broadcast is needed ^^;
# 'complete resume' means all workers' snapshot is preserved,
# so all workers can assume their loaded model is complete.
# Otherwise _MultiNodeOptimizer broadcasts and shares data
# from rank 0.
optimizer.__setattr__('needs_broadcast', False)
def _load(path, filename, target):
chainer.serializers.load_npz(os.path.join(path, filename), target)
def _save(path, filename, target):
# Simple save_npz may cause partial write - instead copied and
# modified a bit from chainer.extensions.snapshot.
_maybe_makedirs(path)
prefix = 'tmp-' + filename
fd, tmppath = tempfile.mkstemp(prefix=prefix, dir=path)
try:
chainer.serializers.save_npz(tmppath, target)
except Exception:
os.close(fd)
os.remove(tmppath)
raise
os.close(fd)
shutil.move(tmppath, os.path.join(path, filename))
def _maybe_makedirs(path):
# This is for Python 2-3 compatibility;
# os.makedirs(path, exist_ok=True) would be more simpler
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| 11,492
| 33.002959
| 79
|
py
|
chainer
|
chainer-master/chainermn/extensions/multi_node_evaluator.py
|
import copy
import six
from chainer.training import extension
from chainer import backend
from chainer.dataset import convert
from chainer import function
from chainer.utils import argument
import chainerx as chx
class GenericMultiNodeEvaluator(extension.Extension):
'''Generic multi-node evaluator for non-allreducable evaluation.
This is to evaluate a Dataset that cannot evenly divided across
all processes in the communicator, for evaluation calculation that
is not applicable to a simple add-and-devide style averaging among
processes.
Users are recommeneded to implement its own local calculation
``calc_local()`` (e.g. at each distributed GPU) and aggregation
``aggregate()`` of its results. Although it has built-in
implementaiton of those two methods.
It has several drawbacks; 1) Additional implementation of
aggregation required to users, and 2) no compatibility with
:class:`~chainer.training.extensions.Evaluator`.
.. note:: No automatic support of Reporter is provided; Set it up
at ``initialize()`` method
Args:
comm:
ChainerMN communicator object
iterator:
An iterator for test dataset. Must be non-repeated.
target (callable):
A model to evaluate with test dataset
device (int or chainer.backend.Device):
A device indicator to send data with converter. Not used
when the converter is not using any devices.
converter (callable):
A converter. Default value is
:func:`chainer.dataset.concat_examples` .
root (int):
Rank number of root process to run bcast and gather with.
progress_hook (callable):
A callable that receives single argument for indicators. The
callable is only callled at root process.
'''
trigger = 1, 'epoch'
default_name = 'validation'
priority = extension.PRIORITY_WRITER
name = None
def __init__(self, comm, iterator, target, device=None,
converter=convert.concat_examples, root=0,
**kwargs):
progress_hook, = argument.parse_kwargs(kwargs, ('progress_hook', None))
self.comm = comm
self.iterator = iterator
self._targets = {"main": target}
self.converter = converter
if device is not None:
device = backend.get_device(device)
self.device = device
self._progress_hook = progress_hook
assert 0 <= root and root < self.comm.size
self.root = root
def __call__(self, trainer):
if hasattr(self.iterator, 'reset'):
self.iterator.reset()
it = self.iterator
else:
it = copy.copy(self.iterator)
if self.comm is not None:
gen = self._evaluate_local(it)
if self.comm.rank == self.root:
total_result = self.aggregate([result for result in gen])
else:
for _ in gen:
pass
total_result = None
else:
# Non-multinode environment
gen = self._evaluate_local_single(self, it)
total_result = self.aggregate([result for result in gen])
return total_result
def calc_local(self, *args, **kwargs):
'''A generic method for local calculation.
Override this method to run its local calculation. Otherwise,
results are calculated with original target and test dataset.
Args:
args:
Result of converter when it is tuple.
kwargs:
Result of converter when it is dict.
Returns:
Arbrary value may be returned, but must not be ``None``.
'''
target = self._targets['main']
return target(*args, **kwargs)
def aggregate(self, results):
'''A generic aggregation method.
Override this method for original aggregation calculation. By
default, it just does nothing but returns the input. This
method is called once and only once across the cluster, at
root process. Reporting can be run here.
Args:
results (list):
List of return value of ``calc_local()`` obtained from
all nodes..
'''
return results
def _evaluate_local_single(self, iterator):
for batch in iterator:
in_arrays = convert._call_converter(
self.converter, batch, self.device)
with function.no_backprop_mode():
if isinstance(in_arrays, tuple):
results = self.calc_local(*in_arrays)
elif isinstance(in_arrays, dict):
results = self.calc_local(**in_arrays)
else:
results = self.calc_local(in_arrays)
if self._progress_hook:
self._progress_hook(batch)
yield results
def _evaluate_local(self, iterator):
# Check whether local eval is all done every 8 rounds
gather_interval = 8
all_done = None
while not all_done:
all_done = None
results = None
for _ in range(gather_interval):
try:
batch = iterator.next()
in_arrays = convert._call_converter(
self.converter, batch, self.device)
with function.no_backprop_mode():
if isinstance(in_arrays, tuple):
results = self.calc_local(*in_arrays)
elif isinstance(in_arrays, dict):
results = self.calc_local(**in_arrays)
else:
results = self.calc_local(in_arrays)
if self.comm.rank == self.root and self._progress_hook:
self._progress_hook(batch)
except StopIteration:
batch = None
results = None
results = self.comm.gather_obj(results, root=self.root)
if self.comm.rank == self.root:
valid_results = [r for r in results if r is not None]
for result in valid_results:
yield result
all_done = len(valid_results) == 0
all_done = self.comm.bcast_obj(all_done, root=self.root)
return
def create_multi_node_evaluator(actual_evaluator, communicator):
"""Create a multi node evaluator from a normal evaluator.
Actually this method patches the evaluator to work in multi node
environment. This method adds several hidden attributes starting
with `_mn_` prefix.
Args:
actual_evaluator: evaluator to be patched
(e.g., ``chainer.training.extensions.Evaluator``)
communicator: ChainerMN communicator
Returns:
The multi-node patched ``actual_evaluator``.
.. note:: After patched, original evaluator does not work
correctly in non-MPI environment.
"""
actual_evaluator._mn_original_evaluate = actual_evaluator.evaluate
actual_evaluator._mn_communicator = communicator
def new_evaluate(self):
local_mean_dict = self._mn_original_evaluate()
# ChainerX support:
# We need convert chainerx ndarray to Native array because
# (1) allreduce_obj is used to compute global mean values, since
# a simple allreduce operation cannot be applied in evaluation.
# (2) allreduce_obj calls mpi4py.allreduce, which pickles the object
# (3) chainerx.ndarray preserves CUDA device internally when pickled
# (4) An error will occur when an ndarray is unpickled in another
# process
arrays = list(local_mean_dict.values())
if len(arrays) > 0:
array0 = list(local_mean_dict.values())[0]
xp = backend.get_array_module(array0)
if xp == chx and array0.device.backend.name == 'cuda':
# Results of evaluation is fairly small, so
# the ndarray is transferred to CPU and allreduce()-ed.
# NOTE: Matrices for evaluation are transferred to the
# host memory and sent via MPI instead of NCCL.
# Although evaluation matrices are small in most cases,
# this is a potential performance issue.
local_mean_dict = {
name: chx.to_numpy(value)
for name, value in local_mean_dict.items()
}
global_mean_dict = {
name:
self._mn_communicator.allreduce_obj(
value) / self._mn_communicator.size
for name, value in sorted(local_mean_dict.items())
}
return global_mean_dict
actual_evaluator.evaluate = six.create_bound_method(
new_evaluate, actual_evaluator)
return actual_evaluator
| 9,083
| 34.484375
| 79
|
py
|
chainer
|
chainer-master/chainermn/extensions/allreduce_persistent.py
|
import chainer
import chainer.training.extension
def _namedpersistents(model):
assert isinstance(model, chainer.Link)
for lname, link in model.namedlinks():
for pname in link._persistent:
yield lname + '/' + pname, link.__dict__[pname]
class AllreducePersistent(chainer.training.extension.Extension):
"""Chainer extension to averagize persistents over workers.
When called, this extension invokes all-reduce communication among
workers to compute averages of persistent variables in the model.
Persistent variables are updated to the averages. Currently, we ignore
integer persistent variables, and only float persistent variables are
handled.
This extension is mainly to improve the running mean and variance of
BatchNormalization by increasing the effective number of examples.
We do not need to call this frequently; call just before storing or
evaluating the model.
Args:
model (chainer.link.Link): Target link object.
comm (ChainerMN communicator): communicator to compute averages.
"""
trigger = 1, 'epoch'
# This extension should be called earlier than evaluators.
priority = chainer.training.extension.PRIORITY_WRITER + 1
def __init__(self, model, comm):
self.model = model
self.comm = comm
def __call__(self, trainer=None):
for _, param in sorted(_namedpersistents(self.model)):
if hasattr(param, 'dtype'):
self.comm._multi_node_mean(None, param)
else:
pass # Integer persistent variables are ignored
| 1,616
| 32.6875
| 74
|
py
|
chainer
|
chainer-master/chainermn/extensions/multi_node_early_stopping_trigger.py
|
from chainer.training.triggers import EarlyStoppingTrigger
from chainermn.extensions import ObservationAggregator
class MultiNodeEarlyStoppingTrigger(object):
"""__init__(\
self, comm, check_trigger=(1, 'epoch'), monitor='main/loss', \
patience=3, mode='auto', verbose=False, \
max_trigger=(100, 'epoch'))
Trigger for Early Stopping in Multiple Node Environments
It serves almost the same as
:class:`~chainer.training.triggers.EarlyStoppingTrigger`,
but it can correctly work in multiple node environments.
The difference between it and
:class:`~chainer.training.triggers.EarlyStoppingTrigger` is that,
in each check interval, it computes the mean of the accumulated
values *across all nodes*. In this way, all nodes will have the same
value to determine the timing at which the trigger fires so that
they will stop at the same time.
Args:
comm : ChainerMN communicator
check_trigger: Trigger that decides the comparison
interval between current best value and new value.
This must be a tuple in the form of ``<int>,
'epoch'`` or ``<int>, 'iteration'`` which is passed to
:class:`~chainer.training.triggers.IntervalTrigger`.
monitor (str) : The metric you want to monitor
patience (int) : Counts to let the trigger be patient.
The trigger will not fire until the condition is met
for successive ``patience`` checks.
mode (str) : ``'max'``, ``'min'``, or ``'auto'``.
It is used to determine how to compare the monitored values.
verbose (bool) : Enable verbose output.
If verbose is true, you can get more information
max_trigger: Upper bound of the number of training loops
suffix (str): Suffix added to the name of the monitored
metric after aggregation.
.. note::
``patients`` is also available as an alias of ``patience`` for
historical reason.
"""
def __init__(self, comm,
*, check_trigger=(1, 'epoch'), monitor='main/loss',
patience=None, mode='auto', verbose=False,
max_trigger=(100, 'epoch'), suffix='_aggregated', **kwargs):
# `patients` as an alias of `patience`
monitor_aggregated = monitor + suffix
self.actual_trigger = EarlyStoppingTrigger(check_trigger=check_trigger,
monitor=monitor_aggregated,
patience=patience,
mode=mode, verbose=verbose,
max_trigger=max_trigger,
**kwargs)
self.aggregator = ObservationAggregator(
comm, monitor,
aggregated_key=monitor_aggregated,
comm_trigger=check_trigger)
def __call__(self, trainer):
self.aggregator(trainer)
return self.actual_trigger(trainer)
def _stop_condition(self):
return self.actual_trigger._stop_condition()
def _init_summary(self):
return self.actual_trigger._init_summary()
def get_training_length(self):
return self.actual_trigger.get_training_length()
| 3,334
| 41.21519
| 79
|
py
|
chainer
|
chainer-master/chainermn/extensions/_multi_node_snapshot.py
|
import io
from chainer.serializers import load_npz
from chainer.serializers import save_npz
from chainer.training.extension import Extension
from chainer.training.extensions._snapshot import _find_latest_snapshot
def multi_node_snapshot(comm, snapshot, replica_sets):
'''Create trainer extension for multi-node snapshots
Provides generis multi-node snapshot saving and auto-load feature
at multi-node environment, leveraging power of single-node
snapshot.
In many cases snapshot target may differ, e.g. only trainer of
rank 0 process often has extensions such as ``LogReport`` and so
on, to not confuse terminal output. Just loading at one process
and broadcasting it to other processes does not work in that case.
This wrapper addresses that issue by defining sets of replicas
where within the set the target object is replicated and supposed
to be same among processes. For example, a trainer example, only
the trainer at rank ``0`` has special extensions and others
doesn't::
trainer = Trainer(updater)
if comm.rank == 0:
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
This case can be described with two replica sets, where each set
can be represented as single integer that indicates rank number,
or iterable set/list/generator of integers like this::
replica_sets = [[0], range(1, comm.size)]
Here the first replica set is described as ``[0]``, or simply in
short just ``0``, and the second replica set is ``range(1,
comm.size)``, representing rest of processes other than ``0``. The
remaining list can be omitted. Thus in that case, it can be
simplified more::
replica_sets = [0,]
In this case, the snapshot will be saved at rank ``0`` process and
at rank ``1`` process. The latter represents the replica set of
``range(1, comm.size)`` . In this case autoloading at
initialization of snapshot extension works after the restart
cleanly, even though the size of the communicator differs.
Once the replica sets are defined, it can be easily extended::
replica_sets = [0,]
snapshot = multi_node_snapshot(comm, extensions.snapshot(),
replica_sets)
trainer.extend(snapshot, trigger=(1, 'epoch'))
More example tuples of replica set representation follows:
===================== ===== ==============================================
code nproc actual sets
===================== ===== ==============================================
``[0]`` ``4`` ``[{0}, {1, 2, 3}]``
``[0, 1]`` ``4`` ``[{0}, {1}, {2, 3}]``
``[0, 1], [2, 3]]`` ``4`` ``[{0, 1}, {2, 3}]``
``[]`` ``4`` ``[{0, 1, 2, 3}]``
``[range(0, 8, 2)]`` ``8`` ``[set(range(0, 8, 2)), set(range(1, 8, 2))]``
===================== ===== ==============================================
Args:
comm (ChainerMN communicator): communicater object
snapshot: Snapshot extension object obtained via
:meth:`~chainer.training.extensions.snapshot` .
replica_sets: list of replica set definition, where
a replica set can be defined by single integer
as rank number, or iterable integers.
Returns:
Trainer extension that wraps ``snapshot`` and properly
controles number of snapshots.
'''
return _MultiNodeSnapshot(comm, snapshot, replica_sets)
def _parse_replica_sets(replica_sets, size):
sets = []
for replica_set in replica_sets:
if isinstance(replica_set, int):
assert replica_set >= 0
assert replica_set < size
sets.append({replica_set})
else:
# Must be iterable
for i in replica_set:
assert i >= 0
assert i < size
sets.append(set(replica_set))
if size > sum(len(s) for s in sets):
all_ranks = set(range(size))
all_exp = set()
for s in sets:
all_exp |= s
rest = all_ranks - all_exp
if rest:
sets.append(rest)
# Must guarantee: no lack allowed
assert size == sum(len(s) for s in sets)
# Must guarantee: no two sets must have intersection.
all_sum = set()
for s in sets:
all_sum |= s
assert size == len(all_sum)
return sets
class _MultiNodeSnapshot(Extension):
def __init__(self, comm, snapshot, replica_sets):
assert comm is not None
assert snapshot is not None
self.comm = comm
self.snapshot = snapshot
# Append rank number to snapshot filename format/function
if callable(snapshot.filename):
filename_fun = snapshot.filename
def append_rank(trainer):
filename = filename_fun(trainer)
return '{}.{}'.format(filename, comm.rank)
snapshot.filename = append_rank
else:
filename = '{}.{}'.format(snapshot.filename, comm.rank)
snapshot.filename = filename
sets = _parse_replica_sets(replica_sets, comm.size)
self.master = None
self.replica_set = []
for s in sets:
if self.comm.rank in s:
self.master = min(s)
self.replica_set = s
break
assert self.master is not None
assert self.comm.rank in self.replica_set
@property
def is_master(self):
return self.master == self.comm.rank
def initialize(self, trainer):
if self.is_master:
self.snapshot.initialize(trainer)
# If autoload is off, no need to re-init this extension.
if not self.snapshot.autoload:
return
if self.snapshot._target is None:
target = trainer
else:
target = self.snapshot._target
# "Broadcast" the target here
if self.is_master:
# Find snapshot again
# TODO(kuenishi): replace with cleaner way to know whether
# a snapshot is autoloaded or not
filename = _find_latest_snapshot(self.snapshot.filename,
trainer.out)
if filename is None:
data = None
else:
buf = io.BytesIO()
save_npz(buf, target)
data = buf.getvalue()
for rank in self.replica_set:
if rank == self.comm.rank:
continue
self.comm.send_obj(data, rank)
# Get the loaded target from master
else:
data = self.comm.recv_obj(self.master)
if data is None:
return
load_npz(io.BytesIO(data), target)
def on_error(self, trainer, e, t):
if self.is_master:
self.snapshot.on_error(trainer, e, t)
def __call__(self, trainer):
if self.is_master:
self.snapshot(trainer)
def finalize(self):
if self.is_master:
self.snapshot.finalize()
| 7,435
| 33.910798
| 79
|
py
|
chainer
|
chainer-master/chainermn/extensions/observation_aggregator.py
|
from __future__ import division
from chainer.training import extension, util
from chainer import Variable
import chainerx as chx
class ObservationAggregator(extension.Extension):
"""Trainer extension to aggregate an observation in the trainer.
Args:
comm: ChainerMN communicator
original_key (str): Key of the observation to be summarized.
If the observation is a :class:`chainer.Variable`, its value
is automatically copied to CPU.
aggregated_key (str): Name of the key after the summarization.
If not specified, it is set to `original_key` to overwrite it.
comm_trigger: Trigger that decides the timing to communicate
observation values for aggregation.
aggregator (function): Function to compute summarization from
individual values. It takes a list of lists of observed values.
Each list contains all the observed values since
the last communication.
"""
trigger = 1, 'iteration'
priority = extension.PRIORITY_EDITOR
name = None
def __init__(self, comm, original_key, aggregated_key=None,
*, comm_trigger=(1, 'iteration'), aggregator=None):
self.comm = comm
self.original_key = original_key
if aggregated_key is None:
self.aggregated_key = original_key
else:
self.aggregated_key = aggregated_key
self.comm_trigger = util.get_trigger(comm_trigger)
self.observation_history = []
self.aggregator = aggregator or _average_2d
def compute_summary(self, trainer):
if self.original_key in trainer.observation:
value = trainer.observation[self.original_key]
if isinstance(value, Variable):
# use to native device as ChainerX array cannot
# be converted to numpy directly, which is what `to_cpu()` does
value.to_device("native")
elif isinstance(value, chx.ndarray) and \
not value.device.name.startswith('native'):
raise ValueError("observation aggregator does not support "
"ChainerX ndarray on CUDA device.")
self.observation_history.append(value)
if not self.comm_trigger(trainer):
return None
observation_history_gathered = self.comm.gather_obj(
self.observation_history)
self.observation_history = []
if self.comm.rank == 0:
global_summary = self.aggregator(observation_history_gathered)
self.comm.bcast_obj(global_summary)
else:
global_summary = self.comm.bcast_obj(None)
return global_summary
def __call__(self, trainer):
summary = self.compute_summary(trainer)
if summary is not None:
trainer.observation[self.aggregated_key] = summary
def _average_2d(xs):
xs = sum(xs, [])
return sum(xs) / len(xs)
| 2,974
| 36.1875
| 79
|
py
|
chainer
|
chainer-master/chainermn/extensions/__init__.py
|
from chainermn.extensions.allreduce_persistent import AllreducePersistent # NOQA
from chainermn.extensions.checkpoint import create_multi_node_checkpointer # NOQA
from chainermn.extensions.multi_node_evaluator import create_multi_node_evaluator # NOQA
from chainermn.extensions.multi_node_evaluator import GenericMultiNodeEvaluator # NOQA
from chainermn.extensions._multi_node_snapshot import multi_node_snapshot # NOQA
from chainermn.extensions.observation_aggregator import ObservationAggregator # NOQA
from chainermn.extensions.multi_node_early_stopping_trigger import MultiNodeEarlyStoppingTrigger # NOQA
| 616
| 76.125
| 104
|
py
|
chainer
|
chainer-master/chainermn/iterators/synchronized_iterator.py
|
import chainer
import numpy
class _SynchronizedIterator(chainer.dataset.iterator.Iterator):
def __init__(self, actual_iterator, communicator):
if not hasattr(actual_iterator, 'order_sampler'):
raise ValueError('actual_iterator must have order_sampler')
else:
super(_SynchronizedIterator, self).__setattr__(
'actual_iterator', actual_iterator)
# Synchronize random seed.
self.communicator = communicator
if self.communicator.rank == 0:
seed = numpy.random.randint(0, 2 ** 32 - 1)
else:
seed = None
seed = self.communicator.bcast_obj(seed, root=0)
# Random number generator for iterator.
rng = numpy.random.RandomState(seed)
self.actual_iterator.order_sampler = \
chainer.iterators.ShuffleOrderSampler(rng)
self.actual_iterator.reset()
def __getattr__(self, attr_name):
return getattr(self.actual_iterator, attr_name)
def __setattr__(self, attr_name, value):
setattr(self.actual_iterator, attr_name, value)
def __next__(self):
return self.actual_iterator.__next__()
def serialize(self, serializer):
self.actual_iterator.serialize(serializer)
def create_synchronized_iterator(actual_iterator, communicator):
"""Create a synchronized iterator from a Chainer iterator.
This iterator shares the same batches on multiple processes,
using the same random number generators to maintain the order of batch
shuffling same.
Here is an example situation.
When we train a sequence-to-sequence model, where the encoder and
the decoder is located on two different processes, we want to share
the same batches on each process, thus inputs for the encoder and
output teacher signals for the decoder become consistent.
In order to use the synchronized iterator, first create the iterator
from Chainer iterator and ChainerMN communicator::
iterator = chainermn.iterators.create_synchronized_iterator(
chainer.iterators.SerialIterator(
dataset, batch_size, shuffle=True),
communicator)
Then you can use it as the ordinary Chainer iterator::
updater = chainer.training.StandardUpdater(iterator, optimizer)
trainer = training.Trainer(updater)
trainer.run()
The resulting iterator shares the same shuffling order among processes
in the specified communicator.
Args:
actual_iterator: Chainer iterator
(e.g., ``chainer.iterators.SerialIterator``).
communicator: ChainerMN communicator.
Returns:
The synchronized iterator based on ``actual_iterator``.
"""
chainer.utils.experimental(
'chainermn.iterators.create_synchronized_iterator')
return _SynchronizedIterator(actual_iterator, communicator)
| 2,898
| 33.927711
| 74
|
py
|
chainer
|
chainer-master/chainermn/iterators/__init__.py
|
from chainermn.iterators.multi_node_iterator import create_multi_node_iterator # NOQA
from chainermn.iterators.synchronized_iterator import create_synchronized_iterator # NOQA
| 178
| 58.666667
| 90
|
py
|
chainer
|
chainer-master/chainermn/iterators/multi_node_iterator.py
|
import chainer
import numpy
def _is_valid_type(element):
if isinstance(element, tuple) and len(element) == 2 \
and hasattr(element[0], 'dtype') \
and hasattr(element[1], 'dtype'):
return True
elif hasattr(element, 'dtype'):
return True
return False
def _build_ctrl_msg(stop, is_valid_data_type, is_paired_dataset, is_new_epoch,
current_position):
ctrl_msg = numpy.ones((5,)) * [int(stop), int(is_valid_data_type),
int(is_paired_dataset), int(is_new_epoch),
int(current_position)]
return ctrl_msg.astype(numpy.float32)
def _parse_ctrl_msg(msg):
stop = bool(msg[0])
is_valid_data_type = bool(msg[1])
is_paired_dataset = bool(msg[2])
is_new_epoch = bool(msg[3])
current_position = int(msg[4])
return stop, is_valid_data_type, is_paired_dataset, is_new_epoch,\
current_position
class _MultiNodeIteratorMaster(chainer.dataset.iterator.Iterator):
def __init__(self, actual_iterator, communicator, rank_master):
super(_MultiNodeIteratorMaster, self).__setattr__(
'communicator', communicator)
super(_MultiNodeIteratorMaster, self).__setattr__(
'actual_iterator', actual_iterator)
super(_MultiNodeIteratorMaster, self).__setattr__(
'rank_master', rank_master)
_dataset_size = numpy.ones((1, )).astype(numpy.float32) \
* len(self.actual_iterator.dataset)
# TODO(tsutsumi): potential deadlock?
self.communicator.bcast(_dataset_size, root=self.rank_master)
if self.actual_iterator._state.order is not None:
self.communicator.bcast(
self.actual_iterator._state.order.astype(numpy.float32),
root=self.rank_master)
else:
# Without shuffle, order is None.
self.communicator.bcast(
-numpy.ones((1, )).astype(numpy.float32),
root=self.rank_master)
def __next__(self):
try:
batch = self.actual_iterator.__next__()
first_elem = batch[0]
is_valid_data_type = _is_valid_type(first_elem)
is_paired_dataset = isinstance(batch, list) \
and isinstance(first_elem, tuple) and len(first_elem) == 2
stop = False
except StopIteration:
stop = True
is_valid_data_type = False
is_paired_dataset = False
is_new_epoch = self.actual_iterator.is_new_epoch
ctrl_msg = _build_ctrl_msg(stop, is_valid_data_type, is_paired_dataset,
is_new_epoch,
self.actual_iterator.current_position)
self.communicator.bcast(ctrl_msg, root=self.rank_master)
if stop:
raise StopIteration
elif not is_valid_data_type:
raise TypeError('Multi node iterator supports ndarray '
'or tuple of scalars as the data type '
'of the batch element only.')
if is_paired_dataset:
_xs, _ys = zip(*batch)
xs = numpy.asarray(_xs, dtype=numpy.float32)
ys = numpy.asarray(_ys, dtype=numpy.float32)
self.communicator.bcast(xs, root=self.rank_master)
self.communicator.bcast(ys, root=self.rank_master)
return batch
else:
if isinstance(batch, list):
batch = numpy.array(batch)
batch = self.communicator.bcast(batch, root=self.rank_master)
return batch.tolist()
next = __next__
def __getattr__(self, attr_name):
return getattr(self.actual_iterator, attr_name)
def __setattr_(self, attr_name, value):
setattr(self.actual_iterator, attr_name, value)
@property
def current_position(self):
return self.actual_iterator.current_position
@property
def epoch_detail(self):
return self.actual_iterator.epoch_detail
@property
def is_new_epoch(self):
return self.actual_iterator.is_new_epoch
def serialize(self, serializer):
# Master's and Slave's serialize must be called at the same time.
self.actual_iterator.serialize(serializer)
self.communicator.bcast_obj(
serializer, root=self.rank_master)
class _MultiNodeIteratorSlave(chainer.dataset.iterator.Iterator):
def __init__(self, communicator, rank_master):
super(_MultiNodeIteratorSlave, self).__init__()
self.communicator = communicator
self.rank_master = rank_master
# Compatibility to Chainer iterators.
self.epoch = 0
self.current_position = 0
self.is_new_epoch = False
# TODO(tsutsumi): potential deadlock?
_size = self.communicator.bcast(None, root=self.rank_master)
self.dataset_size = int(_size)
self._order = self.communicator.bcast(None, root=self.rank_master)
self._order = self._order.astype(numpy.int64)
if self._order[0] == -1:
self._order = None
def __next__(self):
# Check if master iterator received stop signal.
ctrl_msg = self.communicator.bcast(None, root=self.rank_master)
stop, is_valid_data_type, is_paired_dataset, self.is_new_epoch, \
self.current_position = _parse_ctrl_msg(ctrl_msg)
if self.is_new_epoch:
self.epoch += 1
if stop:
raise StopIteration
elif not is_valid_data_type:
raise TypeError('Multi node iterator supports ndarray '
'or tuple of scalars as the data type '
'of the batch element only.')
if is_paired_dataset:
xs = self.communicator.bcast(None, root=self.rank_master)
ys = self.communicator.bcast(None, root=self.rank_master)
return list(zip(xs, ys.astype(numpy.int32)))
else:
batch = self.communicator.bcast(None, root=self.rank_master)
return batch.tolist()
@property
def epoch_detail(self):
return self.epoch + 1. * self.current_position / self.dataset_size
def serialize(self, serializer):
# Master's and Slave's serialize must be called at the same time.
_serializer = self.communicator.bcast_obj(
None, root=self.rank_master)
self.current_position = serializer(
'current_position',
_serializer('current_position', self.current_position)
)
self.epoch = serializer('epoch', _serializer('epoch', self.epoch))
self.is_new_epoch = serializer(
'is_new_epoch',
_serializer('is_new_epoch', self.is_new_epoch)
)
try:
self._order = serializer(
'order',
_serializer('order', self._order)
)
except KeyError:
pass
def create_multi_node_iterator(
actual_iterator, communicator, rank_master=0):
"""Create a multi node iterator from a Chainer iterator.
This iterator shares the same batches on multiple processes, simply
broadcasting batches from master process to slave processes
in each iteration.
Master process obtains batches from ``actual_iterator``, which you can
specify any Chainer iterator (e.g. ``chainer.iterators.SerialIterator``).
Here is an example situation. When we train a sequence-to-sequence model,
where the encoder and the decoder is located on two different processes,
we want to share the same batches on each process, thus inputs for
the encoder and output teacher signals for the decoder become consistent.
In order to use the multi node iterator, first create the iterator
from Chainer iterator and ChainerMN communicator::
iterator = chainermn.iterators.create_multi_node_iterator(
chainer.iterators.SerialIterator(
dataset, batch_size, shuffle=True),
communicator)
Then you can use it as the ordinary Chainer iterator::
updater = chainer.training.StandardUpdater(iterator, optimizer)
trainer = training.Trainer(updater)
trainer.run()
Since this iterator shares batches through network in each iteration,
communication might be large. If you train your model-parallel network
on extremely large dataset, you can also consider to use
``chainermn.iterators.create_synchronized_iterator``.
Current multi node iterator supports numpy.float32 or tuple of
numpy.float32 as the data type of the batch element.
.. note:: ``create_multi_node_iterator`` and ``serialize`` of created
iterators must be called at the same time by master and slaves,
unless it falls into deadlock because they synchronize internal
states of iterators.
Args:
actual_iterator: Chainer iterator
(``chainer.iterators.SerialIterator`` and
``chainer.iterators.MultiprocessIterator`` are supported).
communicator: ChainerMN communicator.
rank_master: process rank to be master.
Returns:
The master-slave iterator based on ``actual_iterator``.
"""
chainer.utils.experimental(
'chainermn.iterators.create_multi_node_iterator')
if communicator.rank == rank_master:
return _MultiNodeIteratorMaster(
actual_iterator, communicator, rank_master)
else:
return _MultiNodeIteratorSlave(communicator, rank_master)
| 9,602
| 36.956522
| 79
|
py
|
chainer
|
chainer-master/chainermn/communicators/non_cuda_aware_communicator.py
|
import warnings
import chainer.cuda
import chainerx
import math
import mpi4py.MPI
import numpy as np
from chainermn.communicators import _communication_utility
from chainermn.communicators import _memory_utility
from chainermn.communicators import mpi_communicator_base
from chainermn import nccl
class NonCudaAwareCommunicator(mpi_communicator_base.MpiCommunicatorBase):
def __init__(self, mpi_comm):
super(NonCudaAwareCommunicator, self).__init__(mpi_comm)
if not nccl._available:
raise RuntimeError(
'NCCL is not available. '
'Please confirm that NCCL is enabled in CuPy.'
)
if nccl.get_version() < 2302:
warnings.warn('NCCL 2.2 and older versions are deprecated.',
DeprecationWarning)
# We have to delay the initialization of communicators. This is because
# NCCL's communicators use the current CUDA devices at the time of
# initialization. Therefore, we have to initialize NCCL communicators
# after users set the devices to use.
self.inter_mpi_comm = None
self.intra_nccl_comm = None
self.gpu_buffer_a = _memory_utility.DeviceMemory()
self.gpu_buffer_b = _memory_utility.DeviceMemory()
self.cpu_buffer_a = _memory_utility.HostPinnedMemory()
self.cpu_buffer_b = _memory_utility.HostPinnedMemory()
def finalize(self):
super(NonCudaAwareCommunicator, self).finalize()
if self.intra_nccl_comm is not None:
chainer.cuda.Stream.null.synchronize()
self.mpi_comm.barrier()
self.intra_nccl_comm.destroy()
self.intra_nccl_comm = None
def _init_comms(self):
if self.inter_mpi_comm is not None:
assert self.intra_nccl_comm is not None
return
intra_mpi_comm = _communication_utility.init_intra_mpi_comm(
self.mpi_comm, self.intra_rank, self.inter_rank)
self.inter_mpi_comm = _communication_utility.init_inter_mpi_comm(
self.mpi_comm, self.intra_rank, self.inter_rank)
self.intra_nccl_comm = _communication_utility.init_nccl_comm(
intra_mpi_comm)
def bcast_data(self, model):
for _, param in sorted(model.namedparams()):
if param.data is not None:
data = param.data
tmp_cpu = chainer.cuda.to_cpu(data)
is_float16 = tmp_cpu.dtype == np.float16
if is_float16:
tmp_cpu = tmp_cpu.astype(np.float32)
self.mpi_comm.Bcast(tmp_cpu)
if is_float16:
tmp_cpu = tmp_cpu.astype(np.float16)
xp = chainer.backend.get_array_module(data)
if xp == chainerx:
# create the chainerx ndarray
tmp_gpu = chainerx.array(tmp_cpu, device=data.device)
else:
tmp_gpu = chainer.cuda.to_gpu(tmp_cpu)
data[:] = tmp_gpu
def multi_node_mean_grad(self, model, zero_fill=False):
self._init_comms()
stream = chainer.cuda.Stream.null
params = _memory_utility.extract_params_set_grad(model, zero_fill)
itemsize = 4
n_elems_total = _memory_utility.count_grad_elements(params,
zero_fill)
n_elems_per_node = int(math.ceil(n_elems_total / self.inter_size))
n_elems_buffer = n_elems_per_node * self.inter_size
n_bytes_per_node = n_elems_per_node * itemsize
n_bytes_buffer = n_bytes_per_node * self.inter_size
self.gpu_buffer_a.assign(n_bytes_buffer)
self.gpu_buffer_b.assign(n_bytes_buffer)
allreduce_grad_dtype = np.float32
self._pack_params_to_buffer(params, 'grad', buffer=self.gpu_buffer_a,
allreduce_grad_dtype=allreduce_grad_dtype,
zero_fill=zero_fill)
if chainer.is_debug():
stream.synchronize()
array_a = self.gpu_buffer_a.array(n_elems_total)
array_b = self.gpu_buffer_b.array(n_elems_total)
self._check_ready_to_allreduce(array_a, array_b)
# Intra-node reduce
self.intra_nccl_comm.reduce(
self.gpu_buffer_a.ptr(), self.gpu_buffer_b.ptr(), n_elems_total,
nccl.NCCL_FLOAT, nccl.NCCL_SUM, 0, stream.ptr)
# Inter-node allreduce
if self.intra_rank == 0:
self.cpu_buffer_a.assign(n_bytes_buffer)
self.cpu_buffer_b.assign(n_bytes_buffer)
arr_b = self.gpu_buffer_b.array(n_elems_buffer)
arr_b.data.copy_to_host(self.cpu_buffer_b.ptr(), n_bytes_buffer)
self.inter_mpi_comm.Alltoall(
[self.cpu_buffer_b.buffer(n_bytes_buffer), mpi4py.MPI.FLOAT],
[self.cpu_buffer_a.buffer(n_bytes_buffer), mpi4py.MPI.FLOAT])
# Reduction in GPU
arr_a = self.gpu_buffer_a.array(n_elems_buffer)
arr_a.data.copy_from_host(self.cpu_buffer_a.ptr(), n_bytes_buffer)
arr_a = arr_a.reshape(self.inter_size, n_elems_per_node)
arr_a = arr_a.sum(axis=0)
arr_a *= 1.0 / self.size
arr_a.data.copy_to_host(self.cpu_buffer_a.ptr(), n_bytes_per_node)
self.inter_mpi_comm.Allgather(
[self.cpu_buffer_a.buffer(n_bytes_per_node), mpi4py.MPI.FLOAT],
[self.cpu_buffer_b.buffer(n_bytes_buffer), mpi4py.MPI.FLOAT])
arr_b.data.copy_from_host(self.cpu_buffer_b.ptr(), n_bytes_buffer)
# Intra-node bcast
self.intra_nccl_comm.bcast(
self.gpu_buffer_b.ptr(), n_elems_total, nccl.NCCL_FLOAT, 0,
stream.ptr)
if chainer.is_debug():
stream.synchronize()
self._ensure_all_finite(self.gpu_buffer_b.array(n_elems_total))
self._unpack_params_from_buffer(params, 'grad', self.gpu_buffer_b,
allreduce_grad_dtype, zero_fill)
| 6,101
| 38.882353
| 79
|
py
|
chainer
|
chainer-master/chainermn/communicators/pure_nccl_communicator.py
|
import warnings
import chainer.cuda
from chainermn.communicators import _communication_utility
from chainermn.communicators import _memory_utility
from chainermn.communicators import mpi_communicator_base
from chainermn import nccl
import numpy as np
class PureNcclCommunicator(mpi_communicator_base.MpiCommunicatorBase):
def __init__(self, mpi_comm):
super(PureNcclCommunicator, self).__init__(mpi_comm)
if not nccl._available:
raise RuntimeError(
'PureNcclCommunicator requires NCCL 2.0+, '
'but NCCL is not available.')
if nccl.get_build_version() < 2000:
raise RuntimeError(
'PureNcclCommunicator requires NCCL 2.0+, '
'but found {}.'.format(nccl.get_build_version()))
if nccl.get_version() < 2302:
warnings.warn('NCCL 2.2 and older versions are deprecated.',
DeprecationWarning)
# We have to delay the initialization of communicators. This is because
# NCCL's communicators use the current CUDA devices at the time of
# initialization. Therefore, we have to initialize NCCL communicators
# after users set the devices to use.
self.nccl_comm = None
self.gpu_tmp_buffer = _memory_utility.DeviceMemory()
self.gpu_buffer_a = _memory_utility.DeviceMemory()
self.gpu_buffer_b = _memory_utility.DeviceMemory()
with self.config_scope():
self.allreduce_grad_dtype = None
self.grad_dtype_to_allreduce_dtype_kernel = None
self.allreduce_dtype_to_grad_dtype_kernel = None
self.params_data = None
def finalize(self):
super(PureNcclCommunicator, self).finalize()
if self.nccl_comm is not None:
chainer.cuda.Stream.null.synchronize()
self.mpi_comm.barrier()
self.nccl_comm.destroy()
self.nccl_comm = None
def _init_comms(self):
if self.nccl_comm is not None:
return
self.nccl_comm = _communication_utility.init_nccl_comm(self.mpi_comm)
def set_config(self, name, value=True, **kwargs):
if name == 'allreduce_grad_dtype':
if value is not None:
allreduce_grad_dtype = np.dtype(value)
if allreduce_grad_dtype.kind != 'f':
raise ValueError(
'allreduce_grad_dtype must be'
'numpy.float16, numpy.float32,'
'numpy.float64, or None.')
else:
allreduce_grad_dtype = None
with self.config_scope():
self.allreduce_grad_dtype = allreduce_grad_dtype
else:
super(PureNcclCommunicator, self).set_config(name, **kwargs)
def get_config(self, name=None):
if name == 'allreduce_grad_dtype':
return self.allreduce_grad_dtype
else:
return super(PureNcclCommunicator, self).get_config(name)
def bcast_data(self, model):
self._init_comms()
params = _memory_utility.extract_params_set_data(model)
data_dtype = chainer.get_dtype()
n_elems = sum(param.data.size for param in params)
data_grad_n_bytes = data_dtype.itemsize * n_elems
if self.gpu_tmp_buffer.size != data_grad_n_bytes:
self.gpu_tmp_buffer.assign(data_grad_n_bytes)
stream = chainer.cuda.Stream.null
_memory_utility.pack_params(
params, 'data', self.gpu_tmp_buffer, data_dtype, False, stream)
self.nccl_comm.bcast(self.gpu_tmp_buffer.ptr(), n_elems,
_communication_utility._get_nccl_type_id(
data_dtype),
0, stream.ptr)
_memory_utility.unpack_params(
params, 'data', self.gpu_tmp_buffer, data_dtype, False, stream)
def multi_node_mean_grad(self, model, zero_fill=False):
stream = chainer.cuda.Stream.null
self._multi_node_mean_grad_async(model, zero_fill, stream)
def _multi_node_mean_grad_async(self, model, zero_fill, stream):
self._init_comms()
params = _memory_utility.extract_params_set_grad(model, zero_fill)
# NOTE: we need to explicitly check `is None` , because
# numpy's dtype object is evaluated to False in numpy <= 1.12.1
if self.allreduce_grad_dtype is not None:
allreduce_grad_dtype = self.allreduce_grad_dtype
else:
allreduce_grad_dtype = chainer.get_dtype()
assert allreduce_grad_dtype is not None
n_elems = _memory_utility.count_grad_elements(params,
zero_fill)
needs_sync = self._prepare_allreduce_pack_buffer(allreduce_grad_dtype,
n_elems)
if stream != chainer.cuda.Stream.null and needs_sync:
chainer.cuda.Stream.null.synchronize()
# pack grads from params -> buffer A
self._pack_params_to_buffer(params, 'grad', self.gpu_buffer_a,
allreduce_grad_dtype,
zero_fill, stream)
# Allreduce from buffer A -> buffer B
# div by comm_size from buffer B -> buffer A
self._multi_node_mean_nccl(self.gpu_buffer_a, self.gpu_buffer_b,
n_elems,
allreduce_grad_dtype, stream)
# unpack params from buffer A -> params
self._unpack_params_from_buffer(params, 'grad', self.gpu_buffer_b,
allreduce_grad_dtype,
zero_fill, stream)
def _prepare_allreduce_pack_buffer(self, allreduce_grad_dtype, n_elems):
allreduce_grad_n_bytes = allreduce_grad_dtype.itemsize * n_elems
needs_sync = False
if self.gpu_buffer_a.size != allreduce_grad_n_bytes:
self.gpu_buffer_a.assign(allreduce_grad_n_bytes)
needs_sync = True
if self.gpu_buffer_b.size != allreduce_grad_n_bytes:
self.gpu_buffer_b.assign(allreduce_grad_n_bytes)
needs_sync = True
return needs_sync
def _multi_node_mean_nccl(self, sendbuf, recvbuf,
n_elems, dtype, stream=None):
"""Compute mean of each element on each processes with NCCL.
The function compute mean of each element in ``sendbuf`` on each
processes. The result is stored in ``recvbuf``. NCCL is used for
communication.
Args:
sendbuf (numpy/cupy array): Input arrays.
recvbuf (numpy/cupy array): Output arrays.
n_elems (int): the number of elements in `sendbuf`.
dtype: Data type of elements used in All-Reduce.
stream: CUDA stream used for All-Reduce.
"""
if chainer.is_debug():
stream.synchronize()
array_a = sendbuf.array(n_elems, dtype=dtype)
array_b = recvbuf.array(n_elems, dtype=dtype)
self._check_ready_to_allreduce(array_a, array_b)
if stream is None:
stream = chainer.cuda.Stream.null
self._init_comms()
type_id = _communication_utility._get_nccl_type_id(dtype)
self.nccl_comm.allReduce(sendbuf.ptr(),
recvbuf.ptr(), n_elems,
type_id, nccl.NCCL_SUM, stream.ptr)
div_by_size = chainer.cuda.elementwise(
'',
'{} x'.format(dtype.name),
'x *= (1.0/{})'.format(self.size), 'div_by_size')
div_by_size(
recvbuf.array(n_elems, dtype=dtype),
stream=stream)
if chainer.is_debug():
stream.synchronize()
self._ensure_all_finite(recvbuf.array(n_elems, dtype=dtype))
| 7,919
| 39.824742
| 79
|
py
|
chainer
|
chainer-master/chainermn/communicators/naive_communicator.py
|
from chainermn.communicators import _memory_utility
from chainermn.communicators import mpi_communicator_base
class NaiveCommunicator(mpi_communicator_base.MpiCommunicatorBase):
def __init__(self, mpi_comm):
super(NaiveCommunicator, self).__init__(mpi_comm)
def multi_node_mean_grad(self, model, zero_fill=False):
params = _memory_utility.extract_params_set_grad(model, zero_fill)
for param in params:
if zero_fill and param.grad is None:
if param.data is None:
continue
param.grad = param.xp.zeros_like(param.data)
self._multi_node_mean(None, param.grad)
| 668
| 36.166667
| 74
|
py
|
chainer
|
chainer-master/chainermn/communicators/dummy_communicator.py
|
from chainermn.communicators import _memory_utility
from chainermn.communicators import mpi_communicator_base
import numpy as np
class DummyCommunicator(mpi_communicator_base.MpiCommunicatorBase):
"""Dummy communicator that does not communicate at all.
This class is intended to measure the overhead of packing and unpacking.
This class does not pass the tests.
"""
def __init__(self, mpi_comm):
super(DummyCommunicator, self).__init__(mpi_comm)
self.gpu_buffer_a = _memory_utility.DeviceMemory()
def multi_node_mean_grad(self, model, zero_fill=False):
params = _memory_utility.extract_params_set_grad(model, zero_fill)
itemsize = 4
n_elems_total = _memory_utility.count_grad_elements(params,
zero_fill)
n_bytes_total = n_elems_total * itemsize
self.gpu_buffer_a.assign(n_bytes_total)
self._pack_params_to_buffer(params, 'grad',
buffer=self.gpu_buffer_a,
allreduce_grad_dtype=np.float32,
zero_fill=zero_fill)
self._unpack_params_from_buffer(params, 'grad',
buffer=self.gpu_buffer_a,
allreduce_grad_dtype=np.float32,
zero_fill=zero_fill)
| 1,430
| 38.75
| 76
|
py
|
chainer
|
chainer-master/chainermn/communicators/_communication_utility.py
|
from chainermn import nccl
import collections
import numpy as np
import pickle
import mpi4py.MPI
def init_ranks(mpi_comm):
"""Returns rank information of the local process in `mpi_comm`.
Args:
mpi_comm (type:TODO)
MPI Communicator from mpi4py
Returns:
rank_info (list):
Elements are:
* rank (`mpi_comm.rank`)
* intra_rank (rank within the local computing node)
* intra_size (number of processes on the node)
* inter_rank (rank of the node)
* inter_size (number of computing nodes)
"""
global_names = mpi_comm.gather(mpi4py.MPI.Get_processor_name())
if mpi_comm.rank == 0:
name_to_global_ranks = collections.defaultdict(list)
for global_rank, name in enumerate(global_names):
name_to_global_ranks[name].append(global_rank)
for global_ranks in name_to_global_ranks.values():
global_ranks.sort()
inter_names = sorted(
set(global_names), key=lambda name: name_to_global_ranks[name])
name_to_inter_rank = {
name: inter_rank
for inter_rank, name in enumerate(inter_names)
}
inter_size = len(inter_names)
all_ranks = []
for global_rank, name in enumerate(global_names):
ranks = name_to_global_ranks[name]
intra_rank = ranks.index(global_rank)
intra_size = len(ranks)
inter_rank = name_to_inter_rank[name]
all_ranks.append((
global_rank, intra_rank, intra_size,
inter_rank, inter_size))
my_ranks = mpi_comm.scatter(all_ranks)
else:
my_ranks = mpi_comm.scatter(None)
assert my_ranks[0] == mpi_comm.rank
return my_ranks
def init_intra_mpi_comm(mpi_comm, intra_rank, inter_rank):
return mpi_comm.Split(inter_rank, intra_rank)
def init_inter_mpi_comm(mpi_comm, intra_rank, inter_rank):
return mpi_comm.Split(intra_rank, inter_rank)
def init_nccl_comm(mpi_comm):
from chainermn import nccl
if mpi_comm.rank == 0:
nccl_comm_id = nccl.get_unique_id()
else:
nccl_comm_id = None
nccl_comm_id = mpi_comm.bcast(nccl_comm_id)
return nccl.NcclCommunicator(mpi_comm.size, nccl_comm_id, mpi_comm.rank)
def inter_allreduce_gpu(
inter_mpi_comm, size, gpu_buffer_a, gpu_buffer_b,
n_bytes_buffer, n_elems_per_node, n_bytes_per_node, cuda_stream):
inter_size = inter_mpi_comm.size
# Exchange all data to get own region data (bufferB -> bufferA)
cuda_stream.synchronize()
inter_mpi_comm.Alltoall(
[gpu_buffer_b.buffer(n_bytes_buffer), mpi4py.MPI.FLOAT],
[gpu_buffer_a.buffer(n_bytes_buffer), mpi4py.MPI.FLOAT])
# Reduce own region data (inplace bufferA) and averaging
ret = gpu_buffer_a.array(inter_size * n_elems_per_node) \
.reshape(inter_size, n_elems_per_node) \
.sum(axis=0) * (1.0 / size)
# Gather others' region data (bufferA -> bufferB)
for i in range(0, inter_size):
gpu_buffer_a.from_device(
ret, n_bytes_per_node, i * n_bytes_per_node)
cuda_stream.synchronize()
inter_mpi_comm.Alltoall(
[gpu_buffer_a.buffer(n_bytes_buffer), mpi4py.MPI.FLOAT],
[gpu_buffer_b.buffer(n_bytes_buffer), mpi4py.MPI.FLOAT])
INT_MAX = 2147483647
def chunked_bcast_obj(obj, mpi_comm, max_buf_len=256 * 1024 * 1024,
root=0):
'''Split object to max_buf_len size chunks and send them out
As mpi4py does not accept an object whose pickled size is larger
than signed integer max (2147483647) the object is pickled and
split into chunks.
Another hack could be try with mpi_comm.bcast(obj) then rank 0
node will receive OverflowError from mpi4py. But in that case rank
> 0 nodes shall block busy waiting forever at mpi_comm.bcast(obj).
Args:
obj: A Python object that is to be broadcasted.
comm: ChainerMN communicator or MPI4py communicator.
root (int): The root process of the scatter operation.
max_buf_len (int): Max buffer size to be used at broadcasting
binaries. Must not be larger than 2147483647 (INT_MAX).
Default value is 256MB.
Returns:
Broadcasted object.
'''
assert max_buf_len < INT_MAX
assert max_buf_len > 0
# check XOR condition of obj is None and rank==0
# rank \ obj | None | not None |
# == 0 | NG | OK |
# > 0 | OK | NG |
assert not (obj is None and mpi_comm.rank == root)
assert not (obj is not None and mpi_comm.rank != root)
if obj is not None and mpi_comm.rank == root:
pickled_bytes = pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL)
else:
pickled_bytes = bytearray()
total_bytes = len(pickled_bytes)
total_chunk_num = total_bytes // max_buf_len
if (total_bytes % max_buf_len) > 0:
total_chunk_num += 1
data = mpi_comm.bcast((total_chunk_num, max_buf_len, total_bytes),
root=root)
assert data is not None
(total_chunk_num, max_buf_len, total_bytes) = data
for i in range(total_chunk_num):
b = i * max_buf_len
e = min(b + max_buf_len, total_bytes)
if mpi_comm.rank == root:
buf = pickled_bytes[b:e]
else:
buf = bytearray(e - b)
mpi_comm.Bcast(buf, root=root)
if mpi_comm.rank != root:
pickled_bytes[b:e] = buf
if mpi_comm.rank != root:
obj = pickle.loads(pickled_bytes)
return obj
def _get_nccl_type_id(dtype):
if dtype == np.float16:
return nccl.NCCL_FLOAT16
elif dtype == np.float32:
return nccl.NCCL_FLOAT32
elif dtype == np.float64:
return nccl.NCCL_FLOAT64
else:
raise ValueError(
'dtype must be float16, float32, or float64.')
| 5,957
| 30.860963
| 76
|
py
|
chainer
|
chainer-master/chainermn/communicators/mpi_communicator_base.py
|
import mpi4py
import numpy
import chainer
import chainer.backends
import chainer.utils
from chainer.utils import collections_abc
from chainermn.communicators import _communication_utility
from chainermn.communicators._communication_utility import chunked_bcast_obj
from chainermn.communicators import _memory_utility
from chainermn.communicators import communicator_base
import chainerx
_dtype_mpi_type = {
# see the definition of mpi4py.MPI._typedict (in mpi4py/MPI/typemap.pxi)
numpy.dtype(numpy.int32): mpi4py.MPI._typedict['i'],
numpy.dtype(numpy.int64): mpi4py.MPI._typedict['l'],
numpy.dtype(numpy.float16): mpi4py.MPI._typedict['f'],
numpy.dtype(numpy.float32): mpi4py.MPI._typedict['f'],
numpy.dtype(numpy.float64): mpi4py.MPI._typedict['d'],
}
def _check_dtype(caller, msgtype):
dtype = msgtype.dtype
if dtype not in _dtype_mpi_type.keys():
raise TypeError(
'{} does not support dtype {}'.format(caller, dtype))
def _check_dtypes_are_same(msgtypes):
dtypes = [msgtype.dtype for msgtype in msgtypes]
if any(dtypes[0] != dtype for dtype in dtypes):
raise TypeError('all dtypes must be the same')
def _is_numpy_array(array):
return isinstance(array, numpy.ndarray)
def _is_cupy_array(array):
return chainer.backend.get_array_module(array) is not numpy
def _cnt_to_dsp(cnt):
"""Utility to convert length array to cumulative array."""
return [0] + numpy.cumsum(cnt)[:-1].tolist()
def _get_mpi_type(msgtype):
dtype = msgtype.dtype
if dtype not in _dtype_mpi_type.keys():
raise TypeError(
'dtype {} is not supported by MpiCommunicator'.format(dtype))
return _dtype_mpi_type[dtype]
class _MessageType(object):
def __init__(self, obj):
if _is_numpy_array(obj) or _is_cupy_array(obj):
self.is_host = _is_numpy_array(obj)
self.is_tuple = False
self.narr = 1
self.ndims = [obj.ndim]
self.shapes = [obj.shape]
self.dtype = obj.dtype
elif isinstance(obj, collections_abc.Iterable):
if all(map(_is_numpy_array, obj)):
self.is_host = True
elif all(map(_is_cupy_array, obj)):
self.is_host = False
else:
raise ValueError(
'All message objects must be either numpy or cupy arrays.')
self.is_tuple = True
self.narr = len(obj)
self.ndims = [x.ndim for x in obj]
self.shapes = [x.shape for x in obj]
dtypes = [x.dtype for x in obj]
if not all(dtype == dtypes[0] for dtype in dtypes):
raise TypeError(
'Message objects must be the same dtype')
self.dtype = dtypes[0]
else:
raise TypeError(
'Message object must be numpy/cupy array or its tuple.')
def get_array_module(self):
if self.is_host:
return numpy
else:
import cupy
return cupy
class MpiCommunicatorBase(communicator_base.CommunicatorBase):
'''MpiCommunicatorBase
Implementation of communicator interface defined by
:class:`CommunicatorBase`. This communicator assumes MPI4py and
all ChainerMN processes are invoked by ``mpirun`` (``mpiexec``)
command. Although this lacks several important methods such as
``multi_node_mean_grad`` to be impelmented with speficic algorithm. See
hierarchical communicator or pure_nccl communicator for example.
'''
def __init__(self, mpi_comm):
self.mpi_comm = mpi_comm
self._init_ranks()
with self.config_scope():
self.batched_copy = False
@property
def rank(self):
return self.mpi_comm.rank
@property
def size(self):
return self.mpi_comm.size
@property
def intra_rank(self):
return self._intra_rank
@property
def intra_size(self):
return self._intra_size
@property
def inter_rank(self):
return self._inter_rank
@property
def inter_size(self):
return self._inter_size
def set_config(self, name, value=True, **kwargs):
if name == 'batched_copy':
with self.config_scope():
self.batched_copy = value
else:
# Although MpiCommunicatorBase has no ancestor, practice
return super(MpiCommunicatorBase, self).set_config(name, **kwargs)
def get_config(self, name=None):
if name == 'batched_copy':
return self.batched_copy
else:
# Although MpiCommunicatorBase has no ancestor, practice.
return super(MpiCommunicatorBase, self).get_config(name)
def split(self, color, key):
return self.__class__(mpi_comm=self.mpi_comm.Split(color, key))
def alltoall(self, xs):
"""A primitive of inter-process all-to-all function.
This method tries to invoke all-to-all communication within the
communicator. All processes in the communicator are expected to
invoke ``alltoall()``. This method relies on mpi4py fast communication
optimized for numpy arrays, as well as ``send()`` and ``recv()``.
If ``xs`` is numpy array, the returned array will also be allocated
as numpy array. Additionally, when ``xs`` is cupy array, the returned
array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
xs (tuple of numpy/cupy array)
Returns:
ys (tuple of numpy/cupy array):
Received arrays. The length of tuple equals to
the communicator size.
"""
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.alltoall')
if len(xs) != self.size:
raise ValueError(
'The length of data must be same as communicator size.')
# Type check.
msgtypes = [_MessageType(x) for x in xs]
for msgtype in msgtypes:
_check_dtype('alltoall', msgtype)
_check_dtypes_are_same(msgtypes)
send_msgtype = msgtypes[0]
msgtypes = self.mpi_comm.alltoall(msgtypes)
_check_dtypes_are_same(msgtypes)
recv_msgtype = msgtypes[0]
# Collective communication.
slens = [x.size for x in xs]
xp = chainer.backend.get_array_module(*xs)
sbuf = xp.hstack([x.reshape(-1) for x in xs])
shapes = [msgtype.shapes[0] for msgtype in msgtypes]
rlens = [chainer.utils.size_of_shape(s) for s in shapes]
rbuf = xp.empty([sum(rlens)], dtype=msgtype.dtype)
if xp is not numpy:
sbuf = _memory_utility.get_device_memory_pointer(sbuf)
chainer.cuda.Stream.null.synchronize()
self.mpi_comm.Alltoallv(
[sbuf, (slens, _cnt_to_dsp(slens)), _get_mpi_type(send_msgtype)],
[_memory_utility.get_device_memory_pointer(rbuf),
(rlens, _cnt_to_dsp(rlens)), _get_mpi_type(recv_msgtype)])
ys = [rbuf[i:i + l].reshape(s)
for i, l, s in zip(_cnt_to_dsp(rlens), rlens, shapes)]
return tuple(ys)
def send(self, data, dest, tag):
"""A primitive for inter-process transmitter.
This method sends numpy-array to target process.
The target process is expected to invoke ``recv()``.
This method relies on mpi4py fast communication optimized for
numpy arrays, which discards any information attached to
chainer.Variable objects. Please be sure.
Args:
data: data to be sent (tuple, list or raw numpy/cupy array)
dest (int): Target process specifier.
tag (int): Message ID (MPI feature).
"""
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.send')
msgtype = _MessageType(data)
_check_dtype('send', msgtype)
"""We use ssend() instead of send() to pass unittests.
If we don't use it, an error occurs in
test_point_to_point_communication.py
when using MVAPICH2-2.2 and GPUs.
"""
self.mpi_comm.ssend(msgtype, dest=dest, tag=tag)
# Type check.
if not msgtype.is_tuple:
data = [data]
for array in data:
if numpy.float16 == array.dtype:
array = array.astype(numpy.float32)
if chainer.backend.get_array_module(array) is not numpy:
chainer.cuda.Stream.null.synchronize()
array = (_memory_utility.get_device_memory_pointer(array),
_get_mpi_type(msgtype))
else:
array = numpy.ascontiguousarray(array)
"""We use Ssend() for the same reason as using ssend()."""
self.mpi_comm.Ssend(array, dest=dest, tag=tag)
def recv(self, source, tag):
"""A primitive of inter-process receiver.
This method tries to receive numpy-array from target process.
The target process is expected to invoke ``send()``.
This method relies on mpi4py fast communication optimized for
numpy arrays, which discards any information attached to
chainer.Variable objects. Please be sure.
If the corresponding ``send()`` is invoked with cupy array,
the returned array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
source (int): Target process specifier.
tag (int): Message ID (MPI feature).
Returns:
data (tuple of numpy/cupy array or numpy/cupy array):
Received data. If ``send()`` is invoked with tuple data,
it is also tuple. Otherwise, it is a vanilla numpy/cupy array.
"""
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.recv')
msgtype = self.mpi_comm.recv(source=source, tag=tag)
xp = msgtype.get_array_module()
if numpy.float16 == msgtype.dtype:
comm_dtype = numpy.float32
else:
comm_dtype = msgtype.dtype
if msgtype.is_tuple:
msg = []
for shape in msgtype.shapes:
buf = xp.empty(
[chainer.utils.size_of_shape(shape)], dtype=comm_dtype)
rtype = _get_mpi_type(msgtype)
self.mpi_comm.Recv(
_memory_utility.array_to_buffer_object(buf, rtype),
source=source, tag=tag)
if numpy.float16 == msgtype.dtype:
buf = buf.astype(numpy.float16)
msg.append(buf.reshape(shape))
return tuple(msg)
else:
assert len(msgtype.shapes) == 1
shape = msgtype.shapes[0]
buf = xp.empty([chainer.utils.size_of_shape(shape)],
dtype=comm_dtype)
rtype = _get_mpi_type(msgtype)
self.mpi_comm.Recv(
_memory_utility.array_to_buffer_object(buf, rtype),
source=source, tag=tag)
if numpy.float16 == msgtype.dtype:
buf = buf.astype(numpy.float16)
return buf.reshape(shape)
def bcast(self, x, root=0):
"""A primitive of inter-process broadcast communication.
This method tries to invoke broadcast communication within the
communicator. All processes in the communicator are expected to
invoke ``broadcast()``. This method relies on mpi4py fast communication
optimized for numpy arrays, as well as ``send()`` and ``recv()``.
If ``bcast()`` is invoked with cupy array in the root process,
the returned array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
x (numpy/cupy array): Array to be broadcasted.
root (int): Rank of root process.
Returns:
ys (tuple of numpy/cupy array): Received arrays.
"""
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.bcast')
is_master = self.mpi_comm.rank == root
if is_master:
msgtype = _MessageType(x)
_check_dtype('bcast', msgtype)
if msgtype.is_tuple:
raise TypeError('Tuple data cannot be broadcasted')
msgtype = self.mpi_comm.bcast(msgtype, root)
shape = msgtype.shapes[0]
buf = _memory_utility.array_to_buffer_object(
x, _get_mpi_type(msgtype))
self.mpi_comm.Bcast(buf, root)
return x
else:
msgtype = self.mpi_comm.bcast(None, root)
xp = msgtype.get_array_module()
shape = msgtype.shapes[0]
buf = xp.empty(
[chainer.utils.size_of_shape(shape)], dtype=msgtype.dtype)
buftype = _get_mpi_type(msgtype)
self.mpi_comm.Bcast(
_memory_utility.array_to_buffer_object(buf, buftype),
root)
return buf.reshape(shape)
def gather(self, x, root=0):
"""A primitive of inter-process gather communication.
This method tries to invoke gather communication within the
communicator. All processes in the communicator are expected to
invoke ``gather()``. This method relies on mpi4py fast communication
optimized for numpy arrays, as well as ``send()`` and ``recv()``.
If ``x`` is numpy array, the received data will also be allocated
as numpy array. Additionally, when ``x`` is cupy array, the returned
array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
x (numpy/cupy array): Array to be gathered.
root (int): Rank of root process.
Returns:
ys (tuple of numpy/cupy array):
Received arrays. ``None`` for non-root processes.
"""
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.gather')
is_master = self.mpi_comm.rank == root
msgtype = _MessageType(x)
_check_dtype('gather', msgtype)
msgtypes = self.mpi_comm.gather(msgtype, root)
if is_master:
_check_dtypes_are_same(msgtypes)
for msgtype in msgtypes:
if msgtype.is_tuple:
raise TypeError('gather cannot handle tuple data')
assert len(msgtype.shapes) == 1
xp = chainer.backend.get_array_module(x)
sbuf = _memory_utility.array_to_buffer_object(
x, _get_mpi_type(msgtype))
shapes = [mty.shapes[0] for mty in msgtypes]
rlens = [chainer.utils.size_of_shape(s) for s in shapes]
rbuf = xp.empty([sum(rlens)], dtype=msgtype.dtype)
if xp is not numpy:
chainer.cuda.Stream.null.synchronize()
self.mpi_comm.Gatherv(
sbuf,
[_memory_utility.get_device_memory_pointer(rbuf),
(rlens, _cnt_to_dsp(rlens)), _get_mpi_type(msgtype)],
root)
ys = [rbuf[i:i + l].reshape(s)
for i, l, s in zip(_cnt_to_dsp(rlens), rlens, shapes)]
return tuple(ys)
else:
sbuf = _memory_utility.array_to_buffer_object(
x, _get_mpi_type(msgtype))
self.mpi_comm.Gatherv(sbuf, None, root)
return None
def allgather(self, x):
chainer.utils.experimental(
'chainermn.communicators.MpiCommunicatorBase.allgather')
msgtype = _MessageType(x)
_check_dtype('allgather', msgtype)
msgtypes = self.mpi_comm.allgather(msgtype)
_check_dtypes_are_same(msgtypes)
# Type check.
for msgtype in msgtypes:
if msgtype.is_tuple:
raise TypeError('allgather cannot handle tuple data')
assert len(msgtype.shapes) == 1
# Collective communication.
xp = chainer.backend.get_array_module(x)
shapes = [msgtype.shapes[0] for msgtype in msgtypes]
sbuf = _memory_utility.array_to_buffer_object(
x, _get_mpi_type(msgtype))
rlens = [chainer.utils.size_of_shape(s) for s in shapes]
rbuf = xp.empty([sum(rlens)], dtype=msgtype.dtype)
if xp is not numpy:
chainer.cuda.Stream.null.synchronize()
self.mpi_comm.Allgatherv(
sbuf,
[_memory_utility.get_device_memory_pointer(rbuf),
(rlens, _cnt_to_dsp(rlens)), _get_mpi_type(msgtype)])
ys = [rbuf[i:i + l].reshape(s)
for i, l, s in zip(_cnt_to_dsp(rlens), rlens, shapes)]
return tuple(ys)
def allreduce(self, x):
"""A primitive of inter-process allreduce communication.
This method tries to invoke allreduce communication within the
communicator. All processes in the communicator are expected to
invoke ``allreduce()``. This method relies on mpi4py fast communication
optimized for numpy arrays, as well as ``send()`` and ``recv()``.
Note that this method can only handle the same shapes of data
over all processes, and cannot handle tuple data.
If ``x`` is numpy array, the received data will also be allocated
as numpy array. Additionally, when ``x`` is cupy array, the returned
array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
x (numpy/cupy array): An array to apply allreduce operation.
Returns:
ys (numpy/cupy array): An array that allreduce (currently SUM only)
has been applied.
"""
msgtype = _MessageType(x)
_check_dtype('allreduce', msgtype)
if msgtype.is_tuple:
raise TypeError('allreduce cannot handle tuple data')
xp = chainer.backend.get_array_module(x)
# TODO(kuenishi): do we check all messages have same shape and dims?
# Source buffer
sbuf = _memory_utility.array_to_buffer_object(
x, _get_mpi_type(msgtype))
# Destination buffer and its object
shape = msgtype.shapes[0]
dbuf = xp.empty(
[chainer.utils.size_of_shape(shape)], dtype=msgtype.dtype)
dbuf_buffer_obj = _memory_utility.array_to_buffer_object(
dbuf, _get_mpi_type(msgtype))
self.mpi_comm.Allreduce(sbuf, dbuf_buffer_obj)
return dbuf.reshape(shape)
def scatter(self, xs, root=0):
"""A primitive of inter-process scatter communication.
This method tries to invoke scatter communication within the
communicator. All processes in the communicator are expected to
invoke ``scatter()``. This method relies on mpi4py fast communication
optimized for numpy arrays, as well as ``send()`` and ``recv()``.
If ``xs`` is tuple, each element is send to different processes.
The length of the tuple must be the same as the communicator size.
If ``xs`` is ``numpy.ndarrray``, it is splitted with the first
axis and sent to different processes. For slave processes, ``xs``
is allowed to be any value (will be ignored).
If ``scatter()`` is invoked with cupy array in the root process,
the returned array will be placed at current device
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
regardless of which device the argument is placed at remote nodes.
Args:
xs (tuple of numpy/cupy array): Arrays to be scattered.
root (int): Rank of root process.
Returns:
ys (numpy/cupy array): Received arrays.
"""
chainer.utils.experimental(
'chainermn.communicators.CommunicatorBase.scatter')
is_master = self.mpi_comm.rank == root
if is_master:
# Type check.
msgtype = _MessageType(xs)
_check_dtype('scatter', msgtype)
if msgtype.is_tuple:
if len(msgtype.shapes) != self.size:
raise ValueError(
'the length of xs must be consistent '
'with communicator size')
xp = chainer.backend.get_array_module(*xs)
msgtype = tuple([_MessageType(x) for x in xs])
shapes = [mty.shapes[0] for mty in msgtype]
# concatenate([x.reshape(-1) ... ], axis=0) will fail
xs = xp.concatenate([x.reshape(1, -1) for x in xs], axis=1)
else:
assert len(msgtype.shapes) == 1
if msgtype.shapes[0][0] != self.mpi_comm.size:
raise ValueError(
'scatter received inconsistent number of inputs '
'with communicator size')
xp = chainer.backend.get_array_module(xs)
msgtype = tuple([_MessageType(xs[0])
for _ in range(self.size)])
shapes = [xs.shape[1:] for _ in range(self.size)]
msgtype = self.mpi_comm.scatter(msgtype, root)
shape = msgtype.shapes[0]
# Collective communication.
slens = [chainer.utils.size_of_shape(s) for s in shapes]
sbuf = _memory_utility.get_device_memory_pointer(xs)
rbuf = xp.empty(
[chainer.utils.size_of_shape(shape)], dtype=msgtype.dtype)
rtype = _get_mpi_type(msgtype)
if xp is not numpy:
chainer.cuda.Stream.null.synchronize()
self.mpi_comm.Scatterv(
[sbuf, (slens, _cnt_to_dsp(slens)), _get_mpi_type(msgtype)],
_memory_utility.array_to_buffer_object(rbuf, rtype), root)
return rbuf.reshape(shape)
else: # slave processes
msgtypes = self.mpi_comm.scatter(None, root)
xp = msgtypes.get_array_module()
shape = msgtypes.shapes[0]
rbuf = xp.empty(
[chainer.utils.size_of_shape(shape)], dtype=msgtypes.dtype)
rtype = _get_mpi_type(msgtypes)
self.mpi_comm.Scatterv(
None,
_memory_utility.array_to_buffer_object(rbuf, rtype),
root)
return rbuf.reshape(shape)
def _check_obj_type_for_chainerx(self, obj):
# Do NOT support chainerx ndarray with CUDA
# for the following reason:
# (1) mpi4py.send pickles the object
# (2) chainerx.ndarray preserves CUDA
# device internally when pickled
# (3) An error will occur when an ndarray is unpickled in another
# process
#
if None is obj:
return False
# check collections of list, tuple and set
elif type(obj) in [list, tuple, set]:
for item in obj:
xp = chainer.backend.get_array_module(item)
# DO NOT use device.backend.name as
# 'ChainerxDevice' object has no attribute 'backend'
if xp == chainerx and item.device.name.startswith('cuda'):
return True
# check dict
elif type(obj) is dict:
for key, value in obj.items():
xp = chainer.backend.get_array_module(key)
if xp == chainerx and key.device.name.startswith('cuda'):
return True
xp = chainer.backend.get_array_module(value)
if xp == chainerx and value.device.name.startswith('cuda'):
return True
else:
xp = chainer.backend.get_array_module(obj)
if xp == chainerx and obj.device.name.startswith('cuda'):
return True
return False
# Objects
def send_obj(self, obj, dest, tag=0):
if self._check_obj_type_for_chainerx(obj):
raise ValueError(
'calling send_obj on chainerx \
with cuda is not supported')
self.mpi_comm.send(obj, dest=dest, tag=tag)
def recv_obj(self, source, status=None, tag=mpi4py.MPI.ANY_TAG):
return self.mpi_comm.recv(source=source, status=status, tag=tag)
def bcast_obj(self, obj, max_buf_len=256 * 1024 * 1024, root=0):
if self._check_obj_type_for_chainerx(obj):
raise ValueError(
'calling bcast_obj on chainerx \
with cuda is not supported')
return chunked_bcast_obj(obj, self.mpi_comm,
max_buf_len=max_buf_len,
root=root)
def gather_obj(self, obj, root=0):
if self._check_obj_type_for_chainerx(obj):
raise ValueError(
'calling gather_obj on chainerx \
with cuda is not supported')
return self.mpi_comm.gather(obj, root=root)
def allreduce_obj(self, obj):
# Summation by default
if self._check_obj_type_for_chainerx(obj):
raise ValueError(
'calling allreduce_obj on chainerx \
with cuda is not supported')
return self.mpi_comm.allreduce(obj)
def bcast_data(self, model):
for _, param in sorted(model.namedparams()):
if param.data is not None:
data = param.data
is_float16 = param.data.dtype == numpy.float16
if is_float16:
data = data.astype(numpy.float32)
buf = _memory_utility.array_to_buffer_object(data)
self.mpi_comm.Bcast(buf)
if is_float16:
# update to array as updating to .data directly
# is not supported in ChainerX
param.array[...] = data.astype(numpy.float16)
# Private methods
def _init_ranks(self):
my_ranks = _communication_utility.init_ranks(self.mpi_comm)
assert my_ranks[0] == self.mpi_comm.rank
self._intra_rank = my_ranks[1]
self._intra_size = my_ranks[2]
self._inter_rank = my_ranks[3]
self._inter_size = my_ranks[4]
def _check_ready_to_allreduce(self, array_a, array_b):
my_shapes = ((None if array_a is None else array_a.shape,
None if array_a is None else array_a.dtype),
array_b.shape,
array_b.dtype)
all_shapes = self.gather_obj((self.rank, my_shapes))
if self.rank == 0:
for rank, shapes in all_shapes:
if my_shapes != shapes:
raise ValueError('Shape does not match: {}'
' at rank 0 while {} at rank {}'
.format(my_shapes, shapes, rank))
def _ensure_all_finite(self, array):
xp = chainer.backend.get_array_module(array)
if not xp.isfinite(array).all():
raise ValueError('Parameters diverged after allreduce.')
def _multi_node_mean(self, sendbuf, recvbuf):
"""Compute mean of each element on each processes.
The function compute mean of each element in ``sendbuf`` on each
processes. The result is stored in ``recvbuf``.
If ``sendbuf`` is ``None``, the function compute mean of each element
in ``recvbuf`` on each processes and replaces ``recvbuf` with the
computed mean.
Args:
sendbuf (numpy/cupy array): Input arrays.
recvbuf (numpy/cupy array): Output arrays.
"""
if chainer.is_debug():
self._check_ready_to_allreduce(sendbuf, recvbuf)
is_float16 = recvbuf.dtype == numpy.float16
if sendbuf is None:
buffer_a = mpi4py.MPI.IN_PLACE
elif is_float16:
assert sendbuf.dtype == recvbuf.dtype
buffer_a = _memory_utility.array_to_buffer_object(
sendbuf.astype(numpy.float32))
else:
buffer_a = _memory_utility.array_to_buffer_object(sendbuf)
if is_float16:
array_b32 = recvbuf.astype(numpy.float32)
else:
array_b32 = recvbuf
buffer_b = _memory_utility.array_to_buffer_object(array_b32)
self.mpi_comm.Allreduce(buffer_a, buffer_b)
if is_float16:
recvbuf[...] = array_b32.astype(numpy.float16)
recvbuf *= 1.0 / self.mpi_comm.size
if chainer.is_debug():
self._ensure_all_finite(recvbuf)
def _pack_params_to_buffer(self, params, attr_name, buffer,
allreduce_grad_dtype, zero_fill, stream=None):
if self.batched_copy:
params_data = _memory_utility.ParamsData(params,
attr_name, zero_fill)
_memory_utility._batched_pack_params(
params_data, buffer,
allreduce_grad_dtype, stream=stream)
self.params_data = params_data
else:
_memory_utility.pack_params(
params, attr_name,
buffer,
transfer_dtype=allreduce_grad_dtype,
zero_fill=zero_fill,
stream=stream)
def _unpack_params_from_buffer(self, params, attr_name, buffer,
allreduce_grad_dtype,
zero_fill, stream=None):
if self.batched_copy:
if self.params_data is not None:
params_data = self.params_data
self.params_data = None
else:
params_data = _memory_utility.ParamsData(
params, attr_name, zero_fill)
_memory_utility._batched_unpack_params(
params_data, buffer,
allreduce_grad_dtype, stream=stream)
return
else:
_memory_utility.unpack_params(
params, attr_name, buffer,
allreduce_grad_dtype, zero_fill, stream)
| 30,778
| 36.719363
| 88
|
py
|
chainer
|
chainer-master/chainermn/communicators/_memory_utility.py
|
import ctypes
import mpi4py.MPI
import numpy as np
from chainermn.communicators import _communication_utility
import chainer.backends
import chainerx as chx
try:
import cupy as cp
_cupy_avail = True
except Exception:
cp = None
_cupy_avail = False
def _get_memory_pointer_from_chainerx(array):
# Currently, ChainerMN requires CuPy to support ChainerX.
# This is because ChainerX's backend does not provide a raw
# memory pointer class.
return cp.cuda.MemoryPointer(
cp.cuda.UnownedMemory(
array.data_ptr + array.offset,
array.data_size,
array,
array.device.index),
0)
class ParamsData(object):
def __init__(self, params, attr_name, zero_fill):
n_params = len(params)
params_dptr = np.empty(n_params, dtype=np.int64)
params_dtype = np.empty(n_params, dtype=np.int32)
params_size_csum = np.empty(n_params+1, dtype=np.int32)
params_size_csum[0] = 0
for i, param in enumerate(params):
v = getattr(param, attr_name)
if attr_name == 'grad' and v is None and zero_fill:
v = param.xp.zeros_like(param.data)
setattr(param, attr_name, v)
xp = chainer.backend.get_array_module(v)
if xp == cp:
v_data = v.data
elif xp == chx:
v_data = _get_memory_pointer_from_chainerx(v)
else:
raise ValueError(
'{} is from an unsupported array module'.format(type(v)))
params_dptr[i] = v_data.ptr
if v.dtype not in [np.float16, np.float32, np.float64]:
raise ValueError('dtype must be float16, float32 or float64.')
params_dtype[i] = _communication_utility._get_nccl_type_id(v.dtype)
params_size_csum[i+1] = params_size_csum[i] + v.size
self.n_params = n_params
self.n_elems = params_size_csum[n_params]
self.size_csum = chainer.cuda.cupy.asarray(params_size_csum)
self.dtype = chainer.cuda.cupy.asarray(params_dtype)
self.dptr = chainer.cuda.cupy.asarray(params_dptr)
class HostPinnedMemory(object):
def __init__(self):
if not _cupy_avail:
raise RuntimeError('HostPinnedMemory cannot be used: ' +
'Cupy is not available.')
self.size = 0
self.memory = None
def assign(self, size):
if size > self.size:
self.size = size
self.memory = cp.cuda.alloc_pinned_memory(size)
def ptr(self, offset=0):
return ctypes.c_void_p(self.memory.ptr + offset)
def buffer(self, size):
return ctypes.cast(
self.memory.ptr,
ctypes.POINTER(ctypes.c_ubyte * size)
).contents
def array(self, count, offset=0, dtype=np.float32):
if dtype is None:
raise TypeError('dtype must be an instance of numpy.dtype class')
return np.frombuffer(
self.memory, count=count, offset=offset, dtype=dtype)
class DeviceMemory(object):
def __init__(self):
if not _cupy_avail:
raise RuntimeError('DeviceMemory cannot be used: ' +
'Cupy is not available.')
self.size = 0
self.memory = None
def assign(self, size):
if size > self.size:
self.size = size
self.memory = cp.cuda.alloc(size)
def from_device(self, src, size, offset=0, stream=None):
dst = self.memory + offset
xp = chainer.backend.get_array_module(src)
if xp == cp:
src_data = src.data
elif xp == chx:
src_data = _get_memory_pointer_from_chainerx(src)
else:
raise ValueError(
'{} is from an unsupported array module'.format(type(src)))
if stream is None:
dst.copy_from_device(src_data, size)
else:
dst.copy_from_device_async(src_data, size, stream)
def to_device(self, dst, size, offset=0, stream=None):
src = self.memory + offset
xp = chainer.backend.get_array_module(dst)
if xp == cp:
dst_data = dst.data
elif xp == chx:
dst_data = _get_memory_pointer_from_chainerx(dst)
else:
raise ValueError(
'{} is from an unsupported array module'.format(type(dst)))
if stream is None:
dst_data.copy_from_device(src, size)
else:
dst_data.copy_from_device_async(src, size, stream)
def ptr(self):
return self.memory.ptr
def buffer(self, size):
return ctypes.cast(
self.memory.ptr,
ctypes.POINTER(ctypes.c_ubyte * size)
).contents
def array(self, shape, offset=0, dtype=np.float32):
if dtype is None:
raise TypeError('dtype must be an instance of numpy.dtype class')
return cp.ndarray(shape, memptr=self.memory + offset, dtype=dtype)
def extract_params_set_data(model):
return [param for _, param in sorted(model.namedparams())
if param.data is not None]
def extract_params_set_grad(model, zero_fill):
if zero_fill:
return [param for _, param in sorted(model.namedparams())
if param.data is not None]
else:
return [param for _, param in sorted(model.namedparams())
if param.data is not None and param.grad is not None]
def count_grad_elements(params, zero_fill):
if zero_fill:
return sum(param.data.size for param in params)
else:
return sum(param.grad.size for param in params)
def pack_params(params, attr_name, buffer,
transfer_dtype, zero_fill, stream=None):
if len(params) == 0:
return
# NOTE: dtypes of params might be mixed, in particular f16 & f32.
offset = 0
for param in params:
v = getattr(param, attr_name)
if attr_name == 'grad' and v is None and zero_fill:
v = param.xp.zeros_like(param.data)
size = v.size * np.dtype(transfer_dtype).itemsize
if v.dtype != transfer_dtype:
tmp = v.astype(transfer_dtype)
buffer.from_device(tmp, size, offset, stream)
else:
buffer.from_device(v, size, offset, stream)
offset += size
def unpack_params(params, attr_name, buffer,
transfer_dtype, zero_fill, stream=None):
"""Pack parameters into a single CuPy array for efficient communication."""
if len(params) == 0:
return
xp = chainer.backend.get_array_module(getattr(params[0], attr_name))
offset = 0
for param in params:
v = getattr(param, attr_name)
if attr_name == 'grad' and v is None and zero_fill:
v = param.xp.empty_like(param.data)
setattr(param, attr_name, v)
size = v.size * np.dtype(transfer_dtype).itemsize
grad_dtype = v.dtype
if grad_dtype != transfer_dtype:
v = xp.array(v, copy=False, dtype=transfer_dtype)
buffer.to_device(v, size, offset, stream)
offset += size
if grad_dtype != transfer_dtype:
# avoid using setattr as ChainerX array cannot be directly updated
getattr(param, attr_name)[...] = v.astype(grad_dtype)
def array_to_buffer_object(array, mpi_dtype=mpi4py.MPI.FLOAT):
xp = chainer.backend.get_array_module(array)
if xp is np:
return get_device_memory_pointer(array)
else:
return (get_device_memory_pointer(array), mpi_dtype)
def get_device_memory_pointer(array):
xp = chainer.backend.get_array_module(array)
array = xp.ascontiguousarray(array)
if xp is np:
return array
elif xp is cp:
return ctypes.cast(
array.data.ptr,
ctypes.POINTER(ctypes.c_ubyte * array.nbytes)
).contents
elif xp is chx:
backend_name = array.device.backend.name
if backend_name not in ['native', 'cuda']:
raise ValueError(
'{} is an unsupported backend'.format(backend_name))
return ctypes.cast(
array.data_ptr,
ctypes.POINTER(ctypes.c_ubyte * array.nbytes)
).contents
else:
raise ValueError(
'{} is from an unsupported array module'.format(type(array)))
def _batched_pack_params(params_data, buffer, dtype, stream=None):
n_params = params_data.n_params
n_elems = params_data.n_elems
params_dptr = params_data.dptr
params_dtype = params_data.dtype
params_size_csum = params_data.size_csum
buf_dtype = _communication_utility._get_nccl_type_id(dtype)
n_threads = 128
n_blocks = (n_elems + n_threads - 1) // n_threads
if stream is None:
stream = cp.cuda.get_current_stream()
with stream:
_cupy_batched_pack_params()(
(n_blocks, ), (n_threads, ),
(buffer.memory.ptr, buf_dtype, n_elems,
params_dptr, params_dtype, params_size_csum, n_params))
def _batched_unpack_params(params_data, buffer, dtype, stream=None):
n_params = params_data.n_params
n_elems = params_data.n_elems
params_dptr = params_data.dptr
params_dtype = params_data.dtype
params_size_csum = params_data.size_csum
buf_dtype = _communication_utility._get_nccl_type_id(dtype)
n_threads = 128
n_blocks = (n_elems + n_threads - 1) // n_threads
if stream is None:
stream = cp.cuda.get_current_stream()
with stream:
_cupy_batched_unpack_params()(
(n_blocks, ), (n_threads, ),
(buffer.memory.ptr, buf_dtype, n_elems,
params_dptr, params_dtype, params_size_csum, n_params))
def _cupy_batched_pack_params():
return chainer.cuda.raw(r'''
#include <cupy/carray.cuh>
#define NCCL_FLOAT16 6
#define NCCL_FLOAT32 7
#define NCCL_FLOAT64 8
extern "C" __global__
void cupy_batched_pack_params(
void *dst0, int dst_dtype, int n_elems,
unsigned long *params_dptr, int *params_dtype,
int *params_size_csum, int n_params) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= n_elems) return;
int j_min = 0;
int j_max = n_params - 1;
int j;
while (1) {
j = (j_min + j_max) / 2;
if (tid < params_size_csum[j]) {
j_max = j - 1;
continue;
}
if (tid >= params_size_csum[j+1]){
j_min = j + 1;
continue;
}
break;
}
assert(tid >= params_size_csum[j]);
assert(tid < params_size_csum[j+1]);
int src_dtype = params_dtype[j];
int src_idx = tid - params_size_csum[j];
if (dst_dtype == NCCL_FLOAT16) {
half* dst = (half*) dst0;
if (src_dtype == NCCL_FLOAT16) {
dst[tid] = (half) (((half*) (params_dptr[j]))[src_idx]);
}
else if (src_dtype == NCCL_FLOAT32) {
dst[tid] = (half) (((float*) (params_dptr[j]))[src_idx]);
}
else if (src_dtype == NCCL_FLOAT64) {
dst[tid] = (half) (((double*) (params_dptr[j]))[src_idx]);
}
}
else if (dst_dtype == NCCL_FLOAT32) {
float* dst = (float*) dst0;
if (src_dtype == NCCL_FLOAT16) {
dst[tid] = (float) (((half*) (params_dptr[j]))[src_idx]);
}
else if (src_dtype == NCCL_FLOAT32) {
dst[tid] = (float) (((float*) (params_dptr[j]))[src_idx]);
}
else if (src_dtype == NCCL_FLOAT64) {
dst[tid] = (float) (((double*) (params_dptr[j]))[src_idx]);
}
}
else if (dst_dtype == NCCL_FLOAT64) {
double* dst = (double*) dst0;
if (src_dtype == NCCL_FLOAT16) {
dst[tid] = (double) (((half*) (params_dptr[j]))[src_idx]);
}
else if (src_dtype == NCCL_FLOAT32) {
dst[tid] = (double) (((float*) (params_dptr[j]))[src_idx]);
}
else if (src_dtype == NCCL_FLOAT64) {
dst[tid] = (double) (((double*) (params_dptr[j]))[src_idx]);
}
}
}
''', 'cupy_batched_pack_params')
def _cupy_batched_unpack_params():
return chainer.cuda.raw(r'''
#include <cupy/carray.cuh>
#define NCCL_FLOAT16 6
#define NCCL_FLOAT32 7
#define NCCL_FLOAT64 8
extern "C" __global__
void cupy_batched_unpack_params(
void *src0, int src_dtype, int n_elems,
unsigned long *params_dptr, int *params_dtype,
int *params_size_csum, int n_params) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= n_elems) return;
int j_min = 0;
int j_max = n_params - 1;
int j;
while (1) {
j = (j_min + j_max) / 2;
if (tid < params_size_csum[j]) {
j_max = j - 1;
continue;
}
if (tid >= params_size_csum[j+1]){
j_min = j + 1;
continue;
}
break;
}
assert(tid >= params_size_csum[j]);
assert(tid < params_size_csum[j+1]);
int dst_dtype = params_dtype[j];
int dst_idx = tid - params_size_csum[j];
if (src_dtype == NCCL_FLOAT16) {
half* src = (half*) src0;
if (dst_dtype == NCCL_FLOAT16) {
((half*) (params_dptr[j]))[dst_idx] = (half) src[tid];
}
else if (dst_dtype == NCCL_FLOAT32) {
((float*) (params_dptr[j]))[dst_idx] = (float) src[tid];
}
else if (dst_dtype == NCCL_FLOAT64) {
((double*) (params_dptr[j]))[dst_idx] = (double) src[tid];
}
}
else if (src_dtype == NCCL_FLOAT32) {
float* src = (float*) src0;
if (dst_dtype == NCCL_FLOAT16) {
((half*) (params_dptr[j]))[dst_idx] = (half) src[tid];
}
else if (dst_dtype == NCCL_FLOAT32) {
((float*) (params_dptr[j]))[dst_idx] = (float) src[tid];
}
else if (dst_dtype == NCCL_FLOAT64) {
((double*) (params_dptr[j]))[dst_idx] = (double) src[tid];
}
}
else if (src_dtype == NCCL_FLOAT64) {
double* src = (double*) src0;
if (dst_dtype == NCCL_FLOAT16) {
((half*) (params_dptr[j]))[dst_idx] = (half) src[tid];
}
else if (dst_dtype == NCCL_FLOAT32) {
((float*) (params_dptr[j]))[dst_idx] = (float) src[tid];
}
else if (dst_dtype == NCCL_FLOAT64) {
((double*) (params_dptr[j]))[dst_idx] = (double) src[tid];
}
}
}''', 'cupy_batched_unpack_params')
| 14,945
| 33.75814
| 79
|
py
|
chainer
|
chainer-master/chainermn/communicators/communicator_base.py
|
from abc import ABCMeta
from abc import abstractmethod
import contextlib
import six
import warnings
class CommunicatorBase(six.with_metaclass(ABCMeta)):
'''Interface definition of all communicators.
All communicators that have compatible set of methods with this
class is supposed to work in ChainerMN's parallel computation
implementation. The methods are named after MPI functions, such
as ``bcast()`` came from ``MPI_Bcast()``.
There are two types of methods: one that treats Python objects
have ``_obj`` suffix. The other has methods without any suffix
and it handles ndarray and arrays filled with scaler values. So
the number of methods would be ::
[send, recv, bcast, gather, allreduce] * [ '_obj', '']
(with single exception ``alltoall``, ``multi_node_mean_grad``, ``split``
and ``bcast_data`` so far). Also methods are supposed to be
written in this order. All those methods must be implemented in
its implementation class, or otherwise it cannot be instantiated
in runtime.
.. note:: As most implementation of ``_obj``-sufficed methods
involves Python object pickling and unpickling, there is an
implicit size limit.
TODO(kuenishi): as of now no implementation class actually has
``allreduce`` method.
'''
_configs = {}
def __init__(self):
self._within_config_scope = False
@property
def rank(self):
'''Rank (process id in the cluster) of this process in integer.'''
raise NotImplementedError()
@property
def size(self):
'''Number of processes of the cluster.'''
raise NotImplementedError()
@property
def intra_rank(self):
'''Intra rank (process id in the machine) of this process.'''
raise NotImplementedError()
@property
def intra_size(self):
'''Number of processes in the machine of this process.'''
raise NotImplementedError()
@property
def inter_rank(self):
'''The rank of this node in the cluster.'''
raise NotImplementedError()
@property
def inter_size(self):
'''Number of nodes that participates the cluster.'''
raise NotImplementedError()
def set_config(self, name, **kwargs):
'''Set configurations(s) on/off
The usage of configurations depends on each communicator. See
:meth:`~chainermn.create_communicator` for available
configurations.
Args:
name (str):
Name of configuration to set.
value:
Give arbitrary object to set.
kwargs:
Arbitrary arguments depending on each configuration.
'''
raise ValueError('Unknown config: {}'.format(name))
def get_config(self, name=None):
'''Get configuration value(s)
Args:
name (str):
Name of the configuration to get. If it is ``None``,
all config names and values are returned.
Returns:
Actual value of the configuration if it is on. ``None`` if it
is off. If ``None`` is given as ``name``, ``None`` or
dictionary of names and configuration values is returned.
'''
if name is not None:
return self._configs[name]
return self._configs
@abstractmethod
def split(self, color, key):
"""A function anologous to ``MPI_Comm_Split`` .
This method splits the inter MPI commnicator and return a wrapped
ChainerMN communicator.
Args:
color (int):
Index of new group. The process with the same color will be
assigned to the same group.
key (int):
Control of rank assignment. The process will be assigned
a rank in the new group ordered by the value of key.
If you do not care of the rank, you can just simply specify
the original rank.
Returns:
CommunicatorBase
"""
raise NotImplementedError()
@abstractmethod
def alltoall(self, xs):
'''All-to-all implementation for ndarray
Args:
xs (tuple of numpy/cupy array)
Returns:
ys (tuple of numpy/cupy array):
Received arrays. The length of tuple equals to
the communicator size.
'''
raise NotImplementedError()
# on ndarrays and such
@abstractmethod
def send(self, data, dest, tag):
'''Sends an ndarray to destination
Receiver must invoke ``recv()`` to wait for the message.
Args:
data: data to be sent (tuple, list or raw numpy/cupy array)
dest (int): Rank of the destination process
tag (int): The tag to identify the message
'''
raise NotImplementedError()
@abstractmethod
def recv(self, source, tag):
'''Receives an ndarray from source.
To receive the message, sender must send the data.
Args:
source (int): Rank of the source process
tag (int): The tag to specifically receive the message
Returns:
The data sent from source process
'''
raise NotImplementedError()
@abstractmethod
def bcast(self, data, max_buf_len=None, root=0):
'''Broadcasts an ndarray from root process to all processes
Args:
data (numpy/cupy array): for root process, the data to broadcast.
For non-root processes, this argument is ignored.
max_buf_len (int): Length of send buffer.
root (int): the process who has the data to broadcast.
Returns:
ys (numpy/cupy array) : The data sent from root process
'''
raise NotImplementedError()
@abstractmethod
def gather(self, data, root=0):
'''Gathers an ndarray from all processes to root process
Args:
data (ndarray, or scaler): for root process this is ignored. For
For non-root processes, the data to send to root process.
root (int): rank of the process who receives the data.
Returns:
For root process, the ndarray sent from non-root processes.
For non-root processes, what?
'''
raise NotImplementedError()
@abstractmethod
def allgather(self, x):
"""A primitive of inter-process all-gather communication.
This method tries to invoke all-gather communication within the
communicator. All processes in the communicator are expected to
invoke ``allgather()``. This method relies on mpi4py fast communication
optimized for numpy arrays, as well as ``send()`` and ``recv()``.
Note that this method can only handle the same shapes of data
over all processes, and cannot handle tuple data.
Args:
x (numpy/cupy array): Array to be gathered.
Returns:
ys (tuple of numpy/cupy array): Received arrays.
"""
raise NotImplementedError()
@abstractmethod
def allreduce(self, data):
'''Allreduce operation among processes
Processes one of several aggregation operations using all data from
all processes and returns the result of the aggregation to all
processes.
TODO(kuenishi): add ``op`` argument once we find a use case
for operations other than 'SUM'.
Args:
data (ndarray): the data to aggregate among all nodes.
Returns:
Sum of all data from all processes.
'''
raise NotImplementedError()
@abstractmethod
def scatter(self, xs, root=0):
"""A primitive of inter-process scatter communication.
This method tries to invoke scatter communication within the
communicator. All processes in the communicator are expected to
invoke ``scatter()``.
Args:
xs (tuple of numpy/cupy array): Arrays to be scattered.
root (int): Rank of root process.
Returns:
ys (numpy/cupy array): Received arrays.
"""
raise NotImplementedError()
def finalize(self):
"""Finalizes and cleans up internal resource.
The communicator SHALL NOT be used after calling this ``finalize()``.
The behaviour is undefined when calling ``finalize`` on the same
communicator multiple times.
"""
pass
# on objects
@abstractmethod
def send_obj(self, obj, dest, tag):
'''Sends an arbitrary Python object to destination with a tag.
Args:
obj: Arbitrary object to send to receiver.
dest (int): Rank number of receiver process (destination).
tag: tag to identify the message.
'''
raise NotImplementedError()
@abstractmethod
def recv_obj(self, source, tag):
'''Receives an arbitrary Python object from source process with a tag.
Args:
source (int): Rank number of sender process, to selectively receive
the object.
tag: tag to identify the message.
Returns:
an object sent from the source by ``send_obj``.
'''
raise NotImplementedError()
@abstractmethod
def bcast_obj(self, obj, max_buf_len=None, root=0):
'''Broadcasts an arbitrary object from root to all non-root processes.
Args:
obj: arbitrary object to broadcast to all other non-root processes.
Will be ignored at all non-root processes.
max_buf_len (int): max length of the send buffer
root (int): rank of the root processes who sends an object
Returns:
an object sent from the root process.
'''
raise NotImplementedError()
@abstractmethod
def gather_obj(self, obj, root=0):
'''Gathers arbitrary objects from all non-root processes to the root.
Args:
obj: arbtrary object to send to root process. Root process will
receive this argument included in returned list.
root (int): rank of the root node who receives all objects.
Returns:
A list of objects sent from all processes.
TODO(kuenishi): make sure the ordering of objects in the returned list.
'''
raise NotImplementedError()
@abstractmethod
def allreduce_obj(self, obj):
'''Apply a reduce operation to all objects and spread the result.
For example of integers and summation, equivalent local code is::
>>> from functools import reduce
>>> reduce(lambda x, y: x + y, [1, 2, 3, 4, 5])
15
The only operation currently supported is summation.
TODO(kuenishi): support other operations such as 'MAX', 'MIN'
and 'PROD' with ``op`` argument once we need any of them.
Args:
obj: An arbitrary object to apply reduce operation. Must have
corresponding operation method e.g. ``__plus__()``.
Returns:
The result of the operation applied to all objects.
'''
raise NotImplementedError()
# Special communication methods on grads and data of models
@abstractmethod
def bcast_data(self, model):
'''Broadcast Chainer model parameter data'''
raise NotImplementedError()
def broadcast_data(self, model):
'''Broadcast Chainer model parameter data
Left for backward compatibility, but ill be deprecated in
future version. Use ``bcast_data()`` method instad.
'''
self.bcast_data(model)
@abstractmethod
def multi_node_mean_grad(self, model, zero_fill=False):
'''mean Chainer model gradients.
Args:
link (~chainer.Link): Link object.
zero_fill: A knob to control whether to fill gradients of
initialized and unused Link (which is None internally) with
zero-valued array, because the all gradients must be an array
among processes for performing all-reduce, which might be an
array or None after backward computation. Gradients of
uninitialized Link are skipped. If it is False, gradients of
unused Link are just skipped.
'''
raise NotImplementedError()
def allreduce_grad(self, model, zero_fill=False):
'''mean Chainer model gradients.
.. deprecated:: v7.0.0
This API is deprecated. Please use
:func:`~chainermn.CommunicatorBase.multi_node_mean_grad` instead.
Args:
link (~chainer.Link): Link object.
zero_fill: A knob to control whether to fill gradients of
initialized and unused Link (which is None internally) with
zero-valued array, because the all gradients must be an array
among processes for performing all-reduce, which might be an
array or None after backward computation. Gradients of
uninitialized Link are skipped. If it is False, gradients of
unused Link are just skipped.
'''
warnings.warn('allreduce_grad() is deprecated.',
DeprecationWarning)
self.multi_node_mean_grad(model, zero_fill)
@property
def within_config_scope(self) -> bool:
"""True if the current code is inside of an initialization scope.
See :meth:`init_scope` for the details of the initialization scope.
"""
return getattr(self, '_within_config_scope', False)
@contextlib.contextmanager
def config_scope(self):
"""Creates an configuration scope.
"""
old_flag = self.within_config_scope
self._within_config_scope = True
try:
yield
finally:
self._within_config_scope = old_flag
def __setattr__(self, name, value):
if self.within_config_scope:
self._configs[name] = value
super(CommunicatorBase, self).__setattr__(name, value)
| 14,214
| 31.160633
| 79
|
py
|
chainer
|
chainer-master/chainermn/communicators/__init__.py
|
import warnings
from chainer.utils import argument
from chainermn.communicators.communicator_base import CommunicatorBase # NOQA
def create_communicator(
communicator_name='pure_nccl', mpi_comm=None, **kwargs):
"""Create a ChainerMN communicator.
Different communicators provide different approaches of communication, so
they have different performance charasteristics. The default communicator
``pure_nccl`` is expected to generally perform well on a variety of
environments, so one need not to change communicators in most cases.
However, you may need to choose other communicators depending on
your computing platform and the availability of NCCL library.
The following communicators are available.
+---------------+---+---+--------+--------------------------------------+
|Name |CPU|GPU|NCCL |Recommended Use Cases |
+===============+===+===+========+======================================+
|pure_nccl | |OK |Required|``pure_nccl`` is recommended when |
| | | |(>= v2) |NCCL2 is available in the environment.|
+---------------+---+---+--------+--------------------------------------+
|flat | |OK | |N/A |
+---------------+---+---+--------+--------------------------------------+
|naive |OK |OK | |Testing on CPU mode |
+---------------+---+---+--------+--------------------------------------+
pure_nccl communicator supports multiple data types, FP32 and FP16,
in gradient exchange. The communication data type is determined based on
`chainer.global_config.dtype` and `allreduce_grad_dtype`.
When `allreduce_grad_dtype` is the default value `None`,
FP32 is used when `chainer.global_config.dtype` is `numpy.float32` and
FP16 otherwise.
`allreduce_grad_dtype` parameter,
which is either `numpy.float16` or `numpy.float32`,
overwrites the `chainer.global_config.dtype`.
The table blow summarizes the data type selection in gradient exchange.
+---------------------+--------------------------------------------+
| | allreduce_grad_dtype |
+---------------------+---------+------------------+---------------+
| global_config.dtype | None | numpy.float16 | numpy.float32 |
+=====================+=========+==================+===============+
| chainer.mixed16 | FP16 | FP16 | FP32 |
+---------------------+---------+------------------+---------------+
| numpy.float16 | FP16 | FP16 | FP32 |
+---------------------+---------+------------------+---------------+
| numpy.float32 | FP32 | FP16 | FP32 |
+---------------------+---------+------------------+---------------+
Other communicators, namely ``flat`` and ``naive``, support only
float32 communication, no matter what the model is. This is due to
MPI's limited support of float16.
Args:
communicator_name: The name of communicator (``naive``, ``flat``,
or ``pure_nccl``)
mpi_comm: MPI4py communicator
allreduce_grad_dtype: Data type of gradient used in All-Reduce.
If ``None``, the dtype of a model is used.
Returns:
ChainerMN communicator that implements methods defined in
:class:`chainermn.CommunicatorBase`
"""
if mpi_comm is None:
try:
import mpi4py.MPI
except ImportError as e:
raise ImportError(str(e) + ': '
'ChainerMN requires mpi4py for '
'distributed training. '
'Please read the Chainer official document '
'and setup MPI and mpi4py.')
mpi_comm = mpi4py.MPI.COMM_WORLD
allreduce_grad_dtype, batched_copy = argument.parse_kwargs(
kwargs, ('allreduce_grad_dtype', None), ('batched_copy', True))
argument.assert_kwargs_empty(kwargs)
if 'batched_copy' in kwargs:
warnings.warn("The option 'batched_copy' is enabled by default "
"it is deprecated, and will be removed in next version",
DeprecationWarning)
if communicator_name != 'pure_nccl' and allreduce_grad_dtype is not None:
raise ValueError(
'allreduce_grad_dtype is only available '
'at \'pure_nccl\' communicator.')
comm = None
if communicator_name == 'naive':
from chainermn.communicators.naive_communicator \
import NaiveCommunicator
comm = NaiveCommunicator(mpi_comm=mpi_comm)
elif communicator_name == 'flat':
from chainermn.communicators.flat_communicator \
import FlatCommunicator
comm = FlatCommunicator(mpi_comm=mpi_comm)
elif communicator_name == 'non_cuda_aware':
from chainermn.communicators.non_cuda_aware_communicator \
import NonCudaAwareCommunicator
comm = NonCudaAwareCommunicator(mpi_comm=mpi_comm)
elif communicator_name == 'pure_nccl':
from chainermn.communicators.pure_nccl_communicator \
import PureNcclCommunicator
comm = PureNcclCommunicator(mpi_comm=mpi_comm)
comm.set_config('allreduce_grad_dtype', allreduce_grad_dtype)
elif communicator_name == 'dummy':
from chainermn.communicators.dummy_communicator \
import DummyCommunicator
comm = DummyCommunicator(mpi_comm=mpi_comm)
else:
raise ValueError(
'Unrecognized communicator: "{}"'.format(communicator_name))
# As all currently supported communicators are all ancestor of
# MpiCommunicator, it is fine calling here for all descendants
comm.set_config('batched_copy', batched_copy)
return comm
| 5,933
| 43.616541
| 78
|
py
|
chainer
|
chainer-master/chainermn/communicators/flat_communicator.py
|
import numpy as np
from chainermn.communicators import _memory_utility
from chainermn.communicators import mpi_communicator_base
class FlatCommunicator(mpi_communicator_base.MpiCommunicatorBase):
def __init__(self, mpi_comm):
super(FlatCommunicator, self).__init__(mpi_comm)
self.gpu_buffer_a = _memory_utility.DeviceMemory()
self.gpu_buffer_b = _memory_utility.DeviceMemory()
def multi_node_mean_grad(self, model, zero_fill=False):
params = _memory_utility.extract_params_set_grad(model, zero_fill)
itemsize = 4
n_elems_total = _memory_utility.count_grad_elements(params,
zero_fill)
n_bytes_total = n_elems_total * itemsize
self.gpu_buffer_a.assign(n_bytes_total)
self.gpu_buffer_b.assign(n_bytes_total)
allreduce_grad_dtype = np.float32
self._pack_params_to_buffer(params, 'grad', buffer=self.gpu_buffer_a,
allreduce_grad_dtype=allreduce_grad_dtype,
zero_fill=zero_fill)
self._multi_node_mean(self.gpu_buffer_a.array(n_elems_total),
self.gpu_buffer_b.array(n_elems_total))
self._unpack_params_from_buffer(params, 'grad', self.gpu_buffer_b,
allreduce_grad_dtype, zero_fill)
| 1,399
| 39
| 78
|
py
|
chainer
|
chainer-master/onnx_chainer/export.py
|
from __future__ import print_function
from collections import OrderedDict
import warnings
import chainer
try:
import onnx
from onnx import checker
from onnx import helper
from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE
from onnx import numpy_helper
from onnx import shape_inference
from onnx_chainer.context import Context
from onnx_chainer.graph import Graph
from onnx_chainer import mapping
from onnx_chainer.onnx_helper import is_support_non_standard_domain
_available = True
except ImportError:
_available = False
MINIMUM_OPSET_VERSION = 7
MAXIMUM_OPSET_VERSION = 11
def _check_available():
if not _available:
raise ImportError(
'ONNX is not installed on your environment. Exporting your model '
'in ONNX format needs the onnx package.\n\n'
'\t$ pip install \'onnx<1.7.0\'\n\n')
def convert_parameter(parameter, context):
if isinstance(parameter, chainer.Parameter):
array = parameter.array
elif isinstance(parameter, chainer.Variable):
array = parameter.array
elif isinstance(parameter, chainer.get_array_types()):
array = parameter
else:
raise ValueError(
'The type of parameter is unknown. It should be either Parameter '
'or Variable or ndarray, but the type was {}.'.format(
type(parameter)))
array = chainer.cuda.to_cpu(array)
tensor = numpy_helper.from_array(array, context.get_name(parameter))
return tensor
def rename_variable_name(
context, variables, named_vars, new_names, prefix='Input'):
# Update ``named_vars`` keys to ``new_names``
if isinstance(variables, (list, tuple)):
if new_names is None:
new_names = ['{}_{}'.format(prefix, i)
for i in range(len(named_vars))]
if not isinstance(new_names, (list, tuple)) or\
len(variables) != len(new_names):
raise ValueError(
'Replacing name list is not match with input (or output) '
'variables')
for i, var in enumerate(variables):
del named_vars[context.get_name(var)]
new_name = new_names[i]
named_vars[new_name] = var
context.set_name(var, new_name, pinned=True)
elif isinstance(variables, dict):
if new_names is None:
new_names = {k: '{}_{}'.format(prefix, i)
for i, k in enumerate(variables.keys())}
if not isinstance(new_names, (list, tuple, dict)) or\
len(variables) != len(new_names):
raise ValueError(
'Replacing name dict is not match with input (or output) '
'variables')
if isinstance(new_names, (list, tuple)):
new_names = {k: v for k, v in zip(variables.keys(), new_names)}
for k, v in variables.items():
if k not in new_names:
raise ValueError(
'Key of replacing name is not found in variables')
del named_vars[context.get_name(v)]
new_name = new_names[k]
named_vars[new_name] = v
context.set_name(v, new_name, pinned=True)
elif isinstance(variables, chainer.Variable):
if not new_names:
new_names = prefix + '_0'
if isinstance(new_names, (list, tuple)):
if len(new_names) != 1:
raise ValueError('Replacing name must be single')
new_name = new_names[0]
elif isinstance(new_names, str):
new_name = new_names
else:
raise ValueError(
'Type {} is not supported for single variable'.format(
type(new_name)))
del named_vars[context.get_name(variables)]
named_vars[new_name] = variables
context.set_name(variables, new_name, pinned=True)
def format_customized_shapes(args, shapes):
if isinstance(args, (list, tuple)):
if not isinstance(shapes, list) or len(args) != len(shapes):
raise ValueError('Customized shapes cannot fit for input list')
for i, (arg, shape) in enumerate(zip(args, shapes)):
if len(arg.shape) != len(shape):
raise ValueError(
'Index-{} shape length must be same as input'.format(i))
return shapes
elif isinstance(args, dict):
if not isinstance(shapes, (list, dict)) or\
len(args) != len(shapes):
raise ValueError('Customized shapes cannot fit for input dict')
if isinstance(shapes, list):
shapes = {k: v for k, v in zip(args.keys(), shapes)}
formatted_shapes = []
for k, arg in args.items():
if k not in shapes:
raise ValueError(
'Key "{}" is not found in customized shapes'.format(k))
if len(arg.shape) != len(shapes[k]):
raise ValueError(
'Key "{}" shape length must be same as input'.format(k))
formatted_shapes.append(shapes[k])
return formatted_shapes
else:
assert isinstance(args, (chainer.Variable, chainer.get_array_types()))
if isinstance(shapes, list):
if len(shapes) != 1:
raise ValueError('Customized shape must be single')
elif not isinstance(shapes, tuple):
raise ValueError(
'Type {} is not supported for single input'.format(
type(shapes)))
else:
shapes = [shapes]
if len(args.shape) != len(shapes[0]):
raise ValueError('Shape length must be same as input')
return shapes
class RetainInputHook(chainer.LinkHook):
"""Retain temporary inputs
Function nodes manage inputs variable nodes using weak reference. When
variable is made as temporary value, exporter cannot get the corresponded
variable from the variable node because the reference is collected. To
resolve it, retain all inputs and will use when make computational graph.
To reduce memory size, this hook retains only variables not showed in link
inputs. To enable this feature, links are required to use ``forward``, not
``__call__``.
"""
def __init__(self):
self.link_inputs = set()
self.retain_inputs = []
self.replaced_inputs = []
self.org_apply = chainer.function_node.FunctionNode.apply
def hooked_apply(_self, inputs):
ret = self.org_apply(_self, inputs)
func_inodes = list(_self.inputs)
for i, inode in enumerate(func_inodes):
referenced_var = inode.get_variable_or_none()
if referenced_var is None:
# This variable is created within function node and weakref
# is lost. Make temporary variable and retain it.
temp_var = chainer.as_variable(inputs[i])
func_inodes[i] = temp_var.node
self.retain_inputs.append(temp_var)
else:
if id(referenced_var) not in self.link_inputs:
# This variable is created within link forward, outside
# of function node. To avoid to lose reference out
# of the forward, retain the variable.
self.retain_inputs.append(referenced_var)
self.replaced_inputs.append((_self, _self.inputs))
_self.inputs = tuple(func_inodes)
return ret
self.hooked_apply = hooked_apply
def _extract_inputs(self, args):
# Retain only chainer.Variable (and its collection)
# Other type args are ignored and not checked instance IDs
# If these variable are used in FunctionNode, they will be retained
ret = set()
if isinstance(args, chainer.Variable):
ret.add(id(args))
elif isinstance(args, (list, tuple)):
for arg in args:
ret |= self._extract_inputs(arg)
elif isinstance(args, dict):
for arg in args.values():
ret |= self._extract_inputs(arg)
return ret
def forward_preprocess(self, args):
self.link_inputs |= self._extract_inputs(args.args)
self.link_inputs |= self._extract_inputs(args.kwargs)
def forward_postprocess(self, args):
self.link_inputs.clear()
def __enter__(self):
chainer.function_node.FunctionNode.apply = self.hooked_apply
return super().__enter__()
def __exit__(self, *exc_details):
chainer.function_node.FunctionNode.apply = self.org_apply
for _self, inputs in self.replaced_inputs:
_self.inputs = inputs
super().__exit__(*exc_details)
def export(model, args, filename=None, export_params=True,
graph_name='Graph', save_text=False, opset_version=None,
input_names=None, output_names=None, train=False,
return_named_inout=False, external_converters=None,
external_opset_imports=None, input_shapes=None, no_testcase=False):
"""Export function for chainer.Chain in ONNX format.
This function performs a forward computation of the given
:class:`~chainer.Chain`, ``model``, by passing the given arguments ``args``
directly. It means, the output :class:`~chainer.Variable` object ``y`` to
make the computational graph will be created by:
``y = model(*args)``
``external_converters`` and ``external_opset_imports`` are for external
custom operator. When some ~chainer.FunctionNode are expected to convert to
own customized operator, set converter function with ~chainer.FunctionNode
name.
>>> import onnx
>>> def custom_converter(param):
... return onnx.helper.make_node(
... 'CustomizedRelu', param.input_names, param.output_names,
... domain='chainer'),
>>>
>>> external_converters = {'ReLU': custom_converter}
>>> external_imports = {'chainer': 0}
>>>
>>> model = chainer.Sequential(F.relu) # set the target model
>>> args = chainer.Variable(np.random.rand(1,10)) # set dummy input
>>> onnx_graph = onnx_chainer.export(
... model, args,
... external_converters=external_converters,
... external_opset_imports=external_imports)
Returned model has ``CustomizedRelu`` node.
Args:
model (~chainer.Chain): The model object you want to export in ONNX
format. It should have :meth:`__call__` method because the second
argument ``args`` is directly given to the model by the ``[]``
accessor.
args (list or dict): The arguments which are given to the model
directly.
filename (str or file-like object): The filename used for saving the
resulting ONNX model. If None, nothing is saved to the disk.
export_params (bool): If True, this function exports all the parameters
included in the given model at the same time. If False, the
exported ONNX model doesn't include any parameter values.
graph_name (str): A string to be used for the ``name`` field of the
graph in the exported ONNX model.
save_text (bool): If True, the text format of the output ONNX model is
also saved with ``.txt`` extention.
opset_version (int): The operator set version of ONNX. If not specified
or ``None`` is given, the latest opset version of the onnx module
is used. If an integer is given, it will be ensured that all the
operator version in the exported ONNX file is less than this value.
input_names (str, list or dict): Customize input names of the graph.
Number of ``input_names`` must be same as number of ``args``.
When set dict type, keys must be same as ``args``'s keys.
output_names (str, list or dict): Customize output name of the graph.
Number of ``output_names`` must be same as actual outputs from
``model``. When set dict type, keys must be same as the key of
``model`` output.
train (bool): If True, output computational graph with train mode.
return_named_inout (bool): If set True, return ONNX model with named
inputs, and named outputs.
external_converters (dict): Add-on converter. Convert functions
keyed by ~chainer.FunctionNode name.
external_opset_imports (dict): Import external opset. opset version
number keyed by domain name.
input_shapes (tuple, list, dict): Input shape of output graph follows
the customized shapes if set. When input are collection type, set
list or dict. Tuple of tuple is not allowed.
Returns:
~onnx.ModelProto or tuple:
When ``return_named_inout`` is ``False``, return ModelProto as an
ONNX model. Otherwise return the tuple of ModelProto, named inputs
and outputs, both inputs and outputs are list of ~chainer.Variable.
"""
_check_available()
if not no_testcase and filename is not None:
warnings.warn(
'Exporting ONNX without testcases is deprecated. '
'Use export_testcase instead',
DeprecationWarning)
with chainer.using_config('train', train),\
chainer.using_config('in_recomputing', True),\
chainer.using_config('enable_backprop', True):
return _export(
model, args, filename, export_params, graph_name, save_text,
opset_version, input_names, output_names, return_named_inout,
external_converters, external_opset_imports, input_shapes)
def _export(model, args, filename, export_params, graph_name, save_text,
opset_version, input_names, output_names, return_named_inout,
external_converters, external_opset_imports, input_shapes):
if opset_version is None:
opset_version = min(
int(onnx.defs.onnx_opset_version()), MAXIMUM_OPSET_VERSION)
elif opset_version < MINIMUM_OPSET_VERSION or \
opset_version > MAXIMUM_OPSET_VERSION:
warnings.warn(
'ONNX-Chainer has been tested only with opset_version {} ~ {}'
'The ONNX file exported with your requested opset_version ({}) '
'may cause some problems because the converters used for the '
'opset_version have not been tested.'.format(
MINIMUM_OPSET_VERSION, MAXIMUM_OPSET_VERSION, opset_version))
if input_shapes is not None:
# if input shapes are invalid, raise exception before forwarding.
input_shapes = format_customized_shapes(args, input_shapes)
with RetainInputHook(), mapping.patch_functions():
# Forward computation
context = Context(model)
network_inputs = OrderedDict()
if isinstance(args, tuple):
args = list(args)
if isinstance(args, list):
for i, arg in enumerate(args):
if isinstance(arg, chainer.get_array_types()):
args[i] = chainer.Variable(arg)
network_inputs[context.get_name(args[i])] = args[i]
outputs = model(*args)
elif isinstance(args, dict):
for key, arg in args.items():
if isinstance(arg, chainer.get_array_types()):
args[key] = chainer.Variable(arg)
network_inputs[context.get_name(args[key])] = args[key]
outputs = model(**args)
elif isinstance(args, chainer.get_array_types()):
args = chainer.Variable(args)
network_inputs[context.get_name(args)] = args
outputs = model(args)
elif isinstance(args, chainer.Variable):
network_inputs[context.get_name(args)] = args
outputs = model(args)
else:
raise ValueError(
'The \'args\' argument should be a list, tuple, dict, '
'numpy array, or Chainer Variable. But a {} object was '
'given.'.format(type(args)))
rename_variable_name(context, args, network_inputs, input_names)
initializers = []
input_tensors = []
param_names = set()
for org_name, param in model.namedparams():
# `model.namedparams()` has `include_uninit` flag but not use, to
# output user warning
if param.array is None:
warnings.warn(
'The parameter \'{}\' is not initialized, skip setting to '
'ONNX graph'.format(org_name))
continue
name = context.get_name(param)
param_names.add(name)
tensor = convert_parameter(param, context)
initializers.append(tensor)
input_tensors.append(helper.make_tensor_value_info(
name, tensor.data_type, tensor.dims))
for i, (name, var) in enumerate(network_inputs.items()):
shape = var.shape if input_shapes is None else input_shapes[i]
input_tensors.append(helper.make_tensor_value_info(
name, NP_TYPE_TO_TENSOR_TYPE[var.dtype], shape))
if external_converters:
chainer.utils.experimental('external_converters')
converters = dict(mapping.converters, **external_converters)
else:
converters = mapping.converters
if isinstance(outputs, (list, tuple)):
flat_outputs = outputs
elif isinstance(outputs, dict):
flat_outputs = list(outputs.values())
elif isinstance(outputs, chainer.Variable):
flat_outputs = [outputs]
else:
raise RuntimeError(
'Unexpected output type from the model: {}'.format(
type(outputs)))
if not all([isinstance(o, chainer.Variable) for o in flat_outputs]):
raise ValueError('The all \'outputs\' must be Chainer Variable')
network_outputs = OrderedDict(
[(context.get_name(var), var) for var in flat_outputs])
if output_names:
rename_variable_name(
context, outputs, network_outputs, output_names)
o = Graph(context, converters, opset_version,
param_names | set(network_inputs.keys()),
network_outputs)
o.to_onnx_graph()
implicit_input_names = set(context.implicit_inputs.keys())
for name in implicit_input_names:
tensor = convert_parameter(context.implicit_inputs[name], context)
initializers.append(tensor)
input_tensors.append(helper.make_tensor_value_info(
name, tensor.data_type, tensor.dims))
# If additional parameters are created during conversion
for param in context.parameters:
tensor = convert_parameter(param, context)
initializers.append(tensor)
input_tensors.append(helper.make_tensor_value_info(
context.get_name(param), tensor.data_type, tensor.dims))
# Convert output tensors
output_tensors = []
for name, var in network_outputs.items():
output_tensors.append(helper.make_tensor_value_info(
name, NP_TYPE_TO_TENSOR_TYPE[var.dtype], var.shape))
if not export_params:
initializers = []
onnx_graph = helper.make_graph(
o.graph, graph_name, input_tensors, output_tensors,
initializer=initializers)
opset_imports = [helper.make_operatorsetid('', opset_version)]
if external_opset_imports:
chainer.utils.experimental('external_opset_imports')
for domain, version in external_opset_imports.items():
opset_imports.append(helper.make_operatorsetid(domain, version))
model = helper.make_model(
onnx_graph,
producer_name='Chainer',
producer_version=chainer.__version__,
opset_imports=opset_imports
)
model.ir_version = onnx.IR_VERSION
check_onnx_model(model, external_converters, external_opset_imports)
if input_shapes is not None:
for output in model.graph.output:
for d in output.type.tensor_type.shape.dim:
d.Clear()
model = shape_inference.infer_shapes(model)
check_onnx_model(model, external_converters, external_opset_imports)
if filename is not None and isinstance(filename, str):
with open(filename, 'wb') as fp:
fp.write(model.SerializeToString())
if save_text:
with open(filename + '.txt', 'w') as fp:
print(model, file=fp)
elif hasattr(filename, 'write'):
filename.write(model.SerializeToString())
if return_named_inout:
chainer.utils.experimental('return_named_inout')
return model, network_inputs, network_outputs
return model
def check_onnx_model(onnx_model, external_converters, external_opset_imports):
try:
checker.check_model(onnx_model)
except onnx.checker.ValidationError as e:
if external_converters is None:
raise e
else:
# ONNX version >= 1.5: default checker skips schema check when
# non standard domain is set. In ONNX-Chainer, external ops without
# doamin is also accepted, but show warning.
# ONNX version < 1.5: the checker does not skip schema check
# regardless domain is set or not. In ONNX-Chainer, ignore
# errors when external ops are set.
if is_support_non_standard_domain():
if external_opset_imports:
raise e
else:
warnings.warn(
'ValidationError is occurred but ignored. '
'ONNX-Chainer recommends to set '
'`external_opset_imports` when using '
'`external_converters` on exporting. Please take care '
'about ONNX format check is insufficient. Error '
'message:\n{}'.format(str(e)), UserWarning)
else:
warnings.warn(
'ValidationError is occurred but ignored because '
'exporting with `external_converters`. Please take care '
'about ONNX format check is insufficient. Error '
'message:\n{}'.format(str(e)), UserWarning)
| 22,516
| 42.21881
| 79
|
py
|
chainer
|
chainer-master/onnx_chainer/context.py
|
import chainer
import onnx
from onnx import numpy_helper
from onnx_chainer import onnx_helper
def _tensor_from_array_for_constant(array, name):
tensor = numpy_helper.from_array(array, name=name)
# Avoid `raw_data` for better debuggability. This would be OK
# since constants are usually small.
field_name = onnx.mapping.STORAGE_TENSOR_TYPE_TO_FIELD.get(
tensor.data_type, None)
if field_name is not None:
tensor.ClearField('raw_data')
getattr(tensor, field_name)[:] = array.flatten().tolist()
return tensor
class Context(object):
"""Context of converter
This context shares names during exporting.
Attributes:
name_list (dict): list of being exported as ONNX node name with pinned
or not, keyed by instance ID. When the target variable is
``chainer.Variable`` or ``chainer.Parameter``, instance ID of
``ndarray`` held by the variable is also put as key, because some
functions like ``F.where`` internally unwrap variable.
"""
def __init__(self, model):
self.name_list = dict()
self.parameters = []
self.constants = []
self.implicit_inputs = dict() # inputs which not connect to output
namedlink = {n: l for n, l in model.namedlinks()}
self.param_to_link = {}
for name, param in model.namedparams():
owned_link_name = name[:name.rindex('/')]
if owned_link_name in namedlink:
onnx_owned_link_name = onnx_helper.cleanse_param_name(
owned_link_name)
self.param_to_link[id(param)] = (
onnx_owned_link_name, namedlink[owned_link_name])
onnx_name = onnx_helper.cleanse_param_name(name)
self.set_name(param, onnx_name)
def get_name(self, variable):
str_id = id(variable)
if str_id in self.name_list:
return self.name_list[str_id][0]
else:
new_name = 'v{}'.format(len(self.name_list))
self.set_name(variable, new_name)
return new_name
def set_name(self, variable, name, pinned=False):
"""Set ONNX node name
Arguments:
variable (var): target variable
name (str): name to be exported as ONNX node name
pinned (bool): if ``True``, the name will not be overwritten in
subsequence process.
"""
str_id = id(variable)
assert str_id not in self.name_list or not self.name_list[str_id][1]
self.name_list[str_id] = (name, pinned)
if isinstance(variable, (chainer.Variable, chainer.Parameter)):
array_id = id(variable.array)
self.name_list[array_id] = (name, pinned)
def is_pinned(self, variable):
str_id = id(variable)
if str_id not in self.name_list:
return False
return self.name_list[str_id][1]
def add_param(self, array, name, use_original_name=False):
"""Add a parameter array as an ONNX initializer.
Returns:
str: registered name.
"""
if use_original_name:
onnx_name = name
else:
if not (name.startswith('/') or name.startswith('_')):
name = '/' + name
onnx_name = '{}_{}'.format(
onnx_helper.get_func_name(),
onnx_helper.cleanse_param_name(name))
self.set_name(array, onnx_name)
self.parameters.append(array)
return onnx_name
def add_const(self, array, name):
"""Add a constant array as an ONNX Constant node.
Returns:
str: registered name.
"""
assert '/' not in name
onnx_name = '{}_const_{}'.format(onnx_helper.get_func_name(), name)
self.set_name(array, onnx_name)
tensor = _tensor_from_array_for_constant(array, name=onnx_name)
const_node = onnx_helper.make_node(
'Constant', [], [onnx_name], value=tensor)
self.constants.append(const_node)
return onnx_name
def get_link(self, param):
"""Return link with name which has the param.
Arguments:
param(chainer.Parameter): the target param.
Returns:
tuple: name and link. returns ``None`` when not found.
"""
return self.param_to_link.get(id(param), None)
| 4,408
| 33.716535
| 78
|
py
|
chainer
|
chainer-master/onnx_chainer/graph.py
|
import collections
from collections import OrderedDict
import heapq
import chainer
from onnx_chainer.functions.converter import FunctionConverterParams
from onnx_chainer import onnx_helper
class Graph(object):
def __init__(self, context, converters, opset_version,
explicit_input_names, network_outputs):
self.context = context
self.converters = converters
self.graph = []
self.func_name_counts = collections.defaultdict(int)
self.outputs = set() # Output variable names
self.specified_opset_version = opset_version
self.explicit_input_names = explicit_input_names
self.network_outputs = network_outputs
self.function_nodes = self._build_computational_graph(
network_outputs.values())
def _build_computational_graph(self, outputs):
cands = []
function_nodes = OrderedDict()
push_count = [0]
def add_cand(cand):
heapq.heappush(cands, (-cand.rank, push_count[0], cand))
push_count[0] += 1
for o in outputs:
if isinstance(o, chainer.Variable):
o = o.node
add_cand(o)
while cands:
_, _, cand = heapq.heappop(cands)
if not isinstance(cand, chainer.variable.VariableNode):
raise NotImplementedError(
'ONNX-Chainer does not support node type {}'.format(
type(cand)))
creator = cand.creator_node
if creator is None:
continue
assert isinstance(creator, chainer.FunctionNode)
creator_id = id(creator)
if creator_id in function_nodes:
continue
function_nodes[creator_id] = creator
for input_ in creator.inputs:
add_cand(input_)
return reversed(function_nodes.values())
def create_node(
self, func_name, func, input_names, output_names):
converter = self.converters.get(func_name, None)
if converter is None:
raise ValueError('{} is not supported'.format(func_name))
params = FunctionConverterParams(
func, self.specified_opset_version, input_names, output_names,
self.context)
nodes = converter(params)
return list(nodes)
def convert_to_onnx_node(self, function):
if isinstance(function, chainer.function.FunctionAdapter):
function = function.function
func_name = getattr(
function, 'custom_function_node_name', function.__class__.__name__)
base_func_name = '{}_{}'.format(
func_name, self.func_name_counts[func_name])
self.func_name_counts[func_name] += 1
input_names = []
for input_var in function.inputs:
# 'input_var' is a VariableNode,
# so check if it has a Variable/Parameter
var = input_var.get_variable_or_none()
if var is None: # No reference to Variable/Parameter
# Use VariableNode as is
input_name = self.context.get_name(input_var)
else: # It is a parameter inside a Link or network input
input_name = self.context.get_name(var)
if (input_name not in self.explicit_input_names and
input_name not in self.outputs):
# register input variables to check implicit inputs
self.context.implicit_inputs[input_name] = var
input_names.append(input_name)
# This is to get corresponding VariableNode id from the output
# Variable of the network
output_names = []
for i, output_ref in enumerate(function.outputs):
if output_ref() is None:
var = output_ref
else:
var = output_ref().get_variable_or_none()
if var is None:
var = output_ref()
output_name = self.context.get_name(var)
# The context sets unique names on node and param, like "v1".
# To be more understandable, change the names like function name
# + number like "FuncitionName_0"
if not self.context.is_pinned(var):
if len(function.outputs) == 1:
new_name = base_func_name
else:
new_name = '{}_{}'.format(base_func_name, i)
if output_name in self.network_outputs:
del self.network_outputs[output_name]
self.network_outputs[new_name] = var
self.context.set_name(var, new_name)
output_name = new_name
self.outputs.add(output_name)
output_names.append(output_name)
onnx_helper.set_func_name(base_func_name)
nodes = self.create_node(
func_name, function, input_names, output_names)
# Insert constants before computation nodes.
self.graph.extend(self.context.constants)
self.context.constants.clear()
self.graph.extend(nodes)
def to_onnx_graph(self):
for node in self.function_nodes:
self.convert_to_onnx_node(node)
| 5,263
| 37.144928
| 79
|
py
|
chainer
|
chainer-master/onnx_chainer/replace_func.py
|
import inspect
import chainer
class WrappedFunctionNode(chainer.FunctionNode):
"""Wrap the target function and operate as ``FunctionNode``
Arguments:
name (str): name of the function node
func (func): the target function
args (list): args for the function
kwargs (dict): kwargs for the function
arg_vars (list): list of `chainer.Variable`s in `args` and `kwargs`
attributes (list): parameters to be set node's attributes
"""
def __init__(self, name, func, args, kwargs, arg_vars, attributes=None):
self.custom_function_node_name = name
self.func = func
self.args = args
self.kwargs = kwargs
self.arg_vars = arg_vars
self.internal_results = None
if attributes is not None:
for k, v in attributes.items():
setattr(self, k, v)
def forward(self, xs):
assert len(xs) == len(self.arg_vars)
self.xs = xs
results = self.func(*self.args, **self.kwargs)
self.skeleton, flattened_results = self._flatten_return_value(results)
dummy_results = tuple(_unwrap_var(ret) for ret in flattened_results)
if all([_is_var(ret) for ret in flattened_results]):
self.internal_results = flattened_results
if not chainer.is_arrays_compatible(dummy_results):
raise ValueError(
'returned values from the function wrapped by \'as_funcnode\' '
'must consist only array, function name: {}'.format(
self.custom_function_node_name))
return dummy_results
def backward(self, target_input_indexes, grad_outputs):
if self.internal_results is None:
raise ValueError(
'the target function does not support backward, propagation '
'is failed')
grad_inputs = chainer.grad(self.internal_results, self.arg_vars,
grad_outputs=grad_outputs)
assert len(self.arg_vars) == len(grad_inputs)
return tuple(grad_input if i in target_input_indexes else None
for i, grad_input in enumerate(grad_inputs))
def _flatten_return_value(self, x):
outputs = []
def skeletonize(r):
if isinstance(r, tuple):
return tuple(skeletonize(e) for e in r)
elif isinstance(r, list):
return [skeletonize(e) for e in r]
elif isinstance(r, dict):
return {k: skeletonize(v) for k, v in r.items()}
else:
index = len(outputs)
outputs.append(r)
return index
skeleton = skeletonize(x)
return skeleton, outputs
def reconstruct_return_value(self, outputs):
def f(skeleton):
if isinstance(skeleton, tuple):
return tuple(f(e) for e in skeleton)
elif isinstance(skeleton, list):
return [f(e) for e in skeleton]
elif isinstance(skeleton, dict):
return {k: f(v) for k, v in skeleton.items()}
else:
return outputs[skeleton]
return f(self.skeleton)
def fake_as_funcnode(alt_func, name, rename_attributes=None,
experimental_warning=True):
"""The target function fakes FunctionNode
The target function is replaced to the alternative function to connect
variable node by acting function node. ``alt_func`` must satisfy the
following restrictions.
1. Inputs includes one or more ``chainer.Variable`` to trace variables.
2. Output consists nothing but ``ndarray`` or ``chainer.Variable``
Even if ``alt_func`` returns ``ndarray``, the value forced to be converted
to ``chainer.Variable``. A caller of the target function have to care
both cases, returning ``ndarray`` and ``chainer.Variable``.
When ``alt_func`` returns ``list`` of variable, the wrapped function will
also returns multiple variables as ``tuple``. However ``dict`` cannot
be return, the wrapped function breaks down the returned values as
``tuple`` of values, keys will be ignored.
Arguments of ``alt_func`` except for ``chainer.Variable`` are set as
function attributes. Attribute names are set ``argN`` (N is index
number) or keyword on default.
Example:
>>> def func(x, a, b, c=1, d=2): pass
>>> # x is variable
>>> func = onnx_chainer.replace_func.fake_as_funcnode(
... func, 'CustomNode',
... rename_attributes=[(1, 'value'), ('c', 'y')])
Then ``func`` will be operated as a function node named "CustomNode", and
``'value'``, ``'b'``, ``'y'``, ``'d'`` are set as function's attributes.
See tests/test_replace_func.py more details.
Args:
alt_func (func): actual called function. There are some constrains, see
the above documentation.
name (str): function name. This name is used for what ONNX operator
to be assigned.
rename_attributes (list or tuple): rename attribute name, set list
of ``tuple(index_of_args, new_name)`` or
``tuple(kwargs_name, new_name)``
experimental_warning: this function is experimental utility, if set
``False``, run without experimental warning.
Returns:
func: wrapped function, called on exporting.
"""
def _wrapper(*args, **kwargs):
inputs = []
attributes = {}
rename_attr_dict = {}
if rename_attributes is not None:
rename_attr_dict = {attr[0]: attr[1] for attr in rename_attributes}
# resolve default value for kwargs
arg_spec = inspect.signature(alt_func)
bound = arg_spec.bind(*args, **kwargs)
bound.apply_defaults()
# default values are set on `bound.arguments`, but cannot get them
# from `bound.kwargs`
for i, (k, v) in enumerate(bound.arguments.items()):
if i < len(args):
continue
kwargs[k] = v
def set_attr(key, value):
default_name = key if isinstance(key, str) else 'arg{}'.format(key)
attributes[rename_attr_dict.get(key, default_name)] = value
def expand_args(args_iter):
for i, a in args_iter:
if _is_var(a):
inputs.append(a)
elif isinstance(a, (tuple, list)):
# all elements are variable -> add flatten them to inputs
# all elements are not variable -> add them to attributes
# mixed variable and other type value -> error
flatten_arg = _flatten(a)
var_or_not = map(_is_var, flatten_arg)
if all(var_or_not):
inputs.extend(flatten_arg)
elif not any(var_or_not):
set_attr(i, a)
else:
raise ValueError(
'arguments mixed variable and other type are not '
'supported')
else:
set_attr(i, a)
expand_args(enumerate(args))
expand_args(kwargs.items())
if not inputs:
raise ValueError(
'arguments of the function wrapped by \'as_funcnode\' '
'must include at least one chainer.Variable, function name: '
'{}'.format(name))
wrapped = WrappedFunctionNode(
name, alt_func, args, kwargs, inputs, attributes=attributes)
ret = wrapped.apply(inputs)
return wrapped.reconstruct_return_value(ret)
if experimental_warning:
chainer.utils.experimental('as_funcnode')
return _wrapper
def as_funcnode(name, rename_attributes=None):
"""The target function fakes FunctionNode
The target function is overwrapped to connect variable node by acting
function node. Expected to be used as decorator. More detail, see
``fake_as_funcnode`` documentation.
Example:
>>> @onnx_chainer.replace_func.as_funcnode(
... 'CustomNode', rename_attributes=[(1, 'value'), ('c', 'y')])
... def func(x, a, b, c=1, d=2): pass
Args:
name (str): function name. This name is used for what ONNX operator
to be assigned.
rename_attributes (list or tuple): rename attribute name, set list
of ``tuple(index_of_args, new_name)`` or
``tuple(kwargs_name, new_name)``
"""
def _wrapper(fn):
return fake_as_funcnode(fn, name, rename_attributes=rename_attributes)
return _wrapper
def _unwrap_var(var):
return var.array if _is_var(var) else var
def _is_var(array):
# alias for type checking
return isinstance(array, chainer.Variable)
def _is_array(v):
return not isinstance(v, (list, tuple))
def _flatten(xs):
if _is_array(xs):
return [xs]
o = []
for x in xs:
if _is_array(x):
o.append(x)
else:
o.extend(_flatten(x))
return o
| 9,133
| 35.390438
| 79
|
py
|
chainer
|
chainer-master/onnx_chainer/__init__.py
|
from onnx_chainer.export import convert_parameter # NOQA
from onnx_chainer.export import export # NOQA
from onnx_chainer.export import MAXIMUM_OPSET_VERSION # NOQA
from onnx_chainer.export import MINIMUM_OPSET_VERSION # NOQA
from onnx_chainer.export_testcase import export_testcase # NOQA
| 296
| 36.125
| 64
|
py
|
chainer
|
chainer-master/onnx_chainer/onnx_helper.py
|
import onnx
__func_name = None # not care the name is unique on whole graph
def set_func_name(func_name):
"""Set the name of Chainer function being converted.
Args:
func_name (str): The name of Chainer function.
"""
global __func_name
__func_name = func_name
def get_func_name():
"""Return processing function name
"""
assert __func_name is not None
return __func_name
def make_node(*args, **kwargs):
"""A thin wrapper of `onnx.helper.make_node`.
Node name will be assigned automatically.
Args:
*args (tuple): ONNX node parameters of the node
**kwargs (dict): ONNX attributes of the node.
Returns:
An `onnx.NodeProto` object.
"""
return onnx.helper.make_node(*args, name=get_func_name(), **kwargs)
class GraphBuilder(object):
"""A helper class to build consecutive ONNX nodes."""
def __init__(self):
self._nodes = []
self._func_name = get_func_name()
def node_name(self):
return '{}_tmp_{}'.format(self._func_name, len(self._nodes))
def op(self, op_name, input_names, num_outputs=1, **kwargs):
"""Creates a new ONNX node and returns its outputs.
Args:
op_name (str): The name of an ONNX op.
input_names (list of str): The names of input values.
num_outputs (int): The number of output values.
**kwargs (dict): ONNX attributes of the node.
Returns:
A str of the output name when `num_outputs` is 1.
A tuple of str of the output names otherwise.
"""
if num_outputs == 1:
output_names = [self.node_name()]
else:
output_names = ['{}_{}'.format(self.node_name(), i) for
i in range(num_outputs)]
return self.op_output_named(
op_name, input_names, output_names, **kwargs)
def op_output_named(
self, op_name, input_names, output_names, **kwargs):
"""Creates a new ONNX node with output names, and returns its outputs.
Args:
op_name (str): The name of an ONNX op.
input_names (list of str): The names of input values.
output_names (int of str): The names of output values.
**kwargs (dict): ONNX attributes of the node.
Returns:
A str of the output name when number of output is 1.
A tuple of str of the output names otherwise.
"""
# Prevent a common mistake. `input_names="input"` creates a
# node with 5 inputs.
assert not isinstance(input_names, str)
node = onnx.helper.make_node(
op_name, input_names, output_names, name=self.node_name(),
**kwargs)
self._nodes.append(node)
if len(output_names) == 1:
return node.output[0]
else:
return tuple(node.output)
def nodes(self, output_names=None):
"""Returns all nodes created so far.
Args:
output_names (list of str): The names of output values to be set at
the last node.
Returns:
A list of `onnx.NodeProto` objects, suitable as the return
value of converter functions.
"""
if output_names is not None:
assert len(self._nodes[-1].output) == len(output_names)
self._nodes[-1].output[:] = output_names
return tuple(self._nodes)
def write_tensor_pb(filename, name, value):
with open(filename, 'wb') as f:
t = onnx.numpy_helper.from_array(value, name)
f.write(t.SerializeToString())
def cleanse_param_name(name):
"""Converts Chainer parameter names to ONNX names.
Note ONNX identifiers must be a valid C identifier.
Args:
name (str): A Chainer parameter name (e.g., /l/W).
Returns
A valid ONNX name (e.g., param_l_W).
"""
return 'param' + name.replace('/', '_')
def is_support_non_standard_domain():
# from ONNX 1.5, skip schema check on ops in non-standard domain
# see: https://github.com/onnx/onnx/pull/1876
# this checker expects onnx adapts semantic versioning
versions = onnx.__version__.split('.')
if len(versions) < 2 or (not versions[1].isdecimal()):
raise RuntimeError(
'ONNX-Chainer cannot get major and minor version ONNX module: '
'{}'.format(onnx.__version__))
major, minor = versions[0], versions[1]
return major == '1' and int(minor) >= 5
| 4,521
| 30.402778
| 79
|
py
|
chainer
|
chainer-master/onnx_chainer/export_testcase.py
|
import os
import warnings
import chainer
from onnx_chainer.export import _available
if _available:
from onnx_chainer.export import export
from onnx_chainer.onnx_helper import cleanse_param_name
from onnx_chainer.onnx_helper import write_tensor_pb
def export_testcase(model, args, out_dir, output_grad=False, **kwargs):
"""Export model and I/O tensors of the model in protobuf format.
Similar to the `export` function, this function first performs a forward
computation to a given input for obtaining an output. Then, this function
saves the pair of input and output in Protobuf format, which is a
defacto-standard format in ONNX.
This function also saves the model with the name "model.onnx".
Args:
model (~chainer.Chain): The model object.
args (list): The arguments which are given to the model
directly. Unlike `export` function, only `list` type is accepted.
out_dir (str): The directory name used for saving the input and output.
output_grad (bool): If True, this function will output model's
gradient with names 'gradient_%d.pb'.
**kwargs (dict): keyword arguments for ``onnx_chainer.export``.
"""
os.makedirs(out_dir, exist_ok=True)
model.cleargrads()
onnx_model, inputs, outputs = export(
model, args, filename=os.path.join(out_dir, 'model.onnx'),
return_named_inout=True, no_testcase=True, **kwargs)
test_data_dir = os.path.join(out_dir, 'test_data_set_0')
os.makedirs(test_data_dir, exist_ok=True)
for i, (name, var) in enumerate(inputs.items()):
pb_name = os.path.join(test_data_dir, 'input_{}.pb'.format(i))
array = chainer.cuda.to_cpu(var.array)
write_tensor_pb(pb_name, name, array)
for i, (name, var) in enumerate(outputs.items()):
pb_name = os.path.join(test_data_dir, 'output_{}.pb'.format(i))
array = chainer.cuda.to_cpu(var.array)
write_tensor_pb(pb_name, name, array)
if output_grad:
# Perform backward computation
if len(outputs) > 1:
outputs = chainer.functions.identity(*outputs)
for out in outputs.values():
out.grad = model.xp.ones_like(out.array)
list(outputs.values())[0].backward()
for i, (name, param) in enumerate(model.namedparams()):
pb_name = os.path.join(test_data_dir, 'gradient_{}.pb'.format(i))
grad = chainer.cuda.to_cpu(param.grad)
onnx_name = cleanse_param_name(name)
if grad is None:
warnings.warn(
'Parameter `{}` does not have gradient value'.format(name))
else:
write_tensor_pb(pb_name, onnx_name, grad)
| 2,741
| 38.73913
| 79
|
py
|
chainer
|
chainer-master/onnx_chainer/mapping.py
|
from contextlib import contextmanager
import chainer.functions as F
from onnx_chainer import functions
from onnx_chainer.functions.converter import FunctionConverter
from onnx_chainer.replace_func import fake_as_funcnode
_supported_function_node_set = {
# Activation
'ClippedReLU',
'ELU',
'HardSigmoid',
'LeakyReLU',
'LogSoftmax',
'PReLUFunction',
'ReLU',
'Selu',
'Sigmoid',
'Softmax',
'Softplus',
'Tanh',
# Array
'Cast',
'Concat',
'Copy',
'Depth2Space',
'Dstack',
'ExpandDims',
'GetItem',
'Hstack',
'Moveaxis',
'Pad',
'Permutate',
'Repeat',
'Reshape',
'ResizeImages',
'Rollaxis',
'SelectItem',
'Separate',
'Shape',
'Space2Depth',
'SplitAxis',
'Squeeze',
'Stack',
'Swapaxes',
'Tile',
'Transpose',
'TransposeSequence',
'Vstack',
'Where',
# Connection
'Convolution2DFunction',
'ConvolutionND',
'Deconvolution2DFunction',
'DeconvolutionND',
'EmbedIDFunction',
'LinearFunction',
# Loss
'SoftmaxCrossEntropy',
# Math
'Absolute',
'Add',
'AddConstant',
'Arccos',
'Arcsin',
'Arctan',
'ArgMax',
'ArgMin',
'BroadcastTo',
'Clip',
'Cos',
'Cosh',
'Div',
'DivFromConstant',
'Exp',
'Identity',
'LinearInterpolate',
'Log',
'LogSumExp',
'MatMul',
'Max',
'Maximum',
'Mean',
'Min',
'Minimum',
'Mul',
'MulConstant',
'Neg',
'PowConstVar',
'PowVarConst',
'PowVarVar',
'Prod',
'RsqrtGPU',
'sign',
'Sin',
'Sinh',
'Sqrt',
'Square',
'Sub',
'SubFromConstant',
'Sum',
'Tan',
# Noise
'Dropout',
# Normalization
'BatchNormalization',
'FixedBatchNormalization',
'GroupNormalization',
'LocalResponseNormalization',
'NormalizeL2',
# Pooling
'AveragePooling2D',
'AveragePoolingND',
'MaxPooling2D',
'MaxPoolingND',
'ROIPooling2D',
'Unpooling2D',
# RNN
'n_step_gru',
}
_converters = None
def _get_converters():
global _converters
if _converters is not None:
return _converters
_converters = {
name: FunctionConverter(getattr(functions, 'convert_'+name, None))
for name in _supported_function_node_set}
return _converters
converters = _get_converters()
_supported_function_set = {
# Math
(F, 'sign'),
# RNN
(F, 'n_step_gru'),
(F.rnn.n_step_gru, 'n_step_gru'),
}
@contextmanager
def patch_functions():
org_funcs = {}
for mod, name in _supported_function_set:
org_func = getattr(mod, name)
org_funcs[(mod, name)] = org_func
setattr(mod, name, fake_as_funcnode(
org_func, name, experimental_warning=False))
try:
yield
finally:
for mod, name in _supported_function_set:
setattr(mod, name, org_funcs[(mod, name)])
| 2,980
| 16.231214
| 74
|
py
|
chainer
|
chainer-master/onnx_chainer/functions/pooling.py
|
import warnings
from chainer.utils import conv
import numpy as np
from onnx_chainer.functions.opset_version import support
from onnx_chainer import onnx_helper
@support((1, 7))
def convert_AveragePooling2D(
func, opset_version, input_names, output_names, context):
pad = [func.ph, func.pw]
stride = [func.sy, func.sx]
ksize = [func.kh, func.kw]
if func.cover_all:
# Supports cover_all by setting extra padding
# NOTE: onnxruntime may not run when "k <= p + s - 1".
pad.extend([p + s - 1 for p, s in zip(pad, stride)])
else:
pad = pad * 2
if opset_version == 1:
raise ValueError(
'AveragePooling2D is not compatible with ONNX\'s AveragePool-1. '
'Use operation set version >= 7.')
elif opset_version == 7:
return onnx_helper.make_node(
'AveragePool', input_names, output_names,
kernel_shape=ksize,
pads=pad,
strides=stride,
count_include_pad=1,
),
@support((1, 7))
def convert_AveragePoolingND(
func, opset_version, input_names, output_names, context):
pad = list(func.pad[:])
if func.cover_all:
# Supports cover_all by setting extra padding
# NOTE: onnxruntime may not run when "k <= p + s - 1".
pad.extend([p + s - 1 for p, s in zip(pad, func.stride)])
else:
pad = pad * 2
if opset_version == 1:
raise ValueError(
'AveragePoolingND is not compatible with ONNX\'s AveragePool-1. '
'Use operation set version >= 7.')
elif opset_version == 7:
return onnx_helper.make_node(
'AveragePool', input_names, output_names,
kernel_shape=func.ksize,
pads=pad,
strides=func.stride,
count_include_pad=1,
),
@support((1, 8, 11))
def convert_MaxPooling2D(
func, opset_version, input_names, output_names, context):
pad = [func.ph, func.pw]
stride = [func.sy, func.sx]
ksize = [func.kh, func.kw]
attrs = {}
if func.cover_all:
if opset_version < 11:
# Supports cover_all by setting extra padding
# NOTE: onnxruntime may not run when "k <= p + s - 1".
pad.extend([p + s - 1 for p, s in zip(pad, func.stride)])
else:
pad = pad * 2
attrs['ceil_mode'] = 1
else:
pad = pad * 2
if opset_version == 1:
return onnx_helper.make_node(
'MaxPool', input_names, output_names,
kernel_shape=ksize,
pads=pad,
strides=stride
),
elif opset_version >= 8:
return onnx_helper.make_node(
'MaxPool', input_names, output_names,
kernel_shape=ksize,
pads=pad,
strides=stride,
storage_order=0, # row major
**attrs,
),
@support((1, 8, 11))
def convert_MaxPoolingND(
func, opset_version, input_names, output_names, context):
pad = list(func.pad[:])
attrs = {}
if func.cover_all:
if opset_version < 11:
# Supports cover_all by setting extra padding
# NOTE: onnxruntime may not run when "k <= p + s - 1".
pad.extend([p + s - 1 for p, s in zip(pad, func.stride)])
else:
pad = pad * 2
attrs['ceil_mode'] = 1
else:
pad = pad * 2
if opset_version == 1:
return onnx_helper.make_node(
'MaxPool', input_names, output_names,
kernel_shape=func.ksize,
pads=pad,
strides=func.stride
),
elif opset_version >= 8:
return onnx_helper.make_node(
'MaxPool', input_names, output_names,
kernel_shape=func.ksize,
pads=pad,
strides=func.stride,
storage_order=0, # row major
**attrs,
),
def convert_ROIPooling2D(
func, opset_version, input_names, output_names, context):
warnings.warn(
'It\'s possible that output does not match with Chainer, please check '
'each runtime\'s implementation. For example, when input x has '
'negative values, some runtimes set max(output, 0) unlike Chainer.',
UserWarning)
return onnx_helper.make_node(
'MaxRoiPool', input_names, output_names,
pooled_shape=[func.outh, func.outw],
spatial_scale=func.spatial_scale,
),
@support((7, 9, 10, 11))
def convert_Unpooling2D(
func, opset_version, input_names, output_names, context):
pad = [func.ph, func.pw]
stride = [func.sy, func.sx]
ksize = [func.kh, func.kw]
outsize = [func.outh, func.outw]
# TODO(hamaji): These could be implemented by `Slice` and `Pad`.
h, w = func.inputs[0].shape[2:]
expected_outsize = [
conv.get_deconv_outsize(
h, func.kh, func.sy, func.ph, cover_all=func.cover_all),
conv.get_deconv_outsize(
w, func.kw, func.sx, func.pw, cover_all=func.cover_all)
]
if outsize != expected_outsize:
raise RuntimeError('ONNX-chainer does not support `outsize!=None` '
'for Unpooling2D: expected={} actual={}'.format(
expected_outsize, outsize))
if pad != [0, 0]:
raise RuntimeError('ONNX-chainer does not support `pad!=0` '
'for Unpooling2D')
# This one would require an extra 1x1 MaxPool.
if stride != ksize:
raise RuntimeError('ONNX-chainer does not support `stride!=ksize` '
'for Unpooling2D: stride={} ksize={}'.format(
stride, ksize))
if func.cover_all:
uncovered_outsize = [
conv.get_deconv_outsize(
h, func.kh, func.sy, func.ph, cover_all=False),
conv.get_deconv_outsize(
w, func.kw, func.sx, func.pw, cover_all=False)
]
scales = [
1.0, 1.0,
func.kh * outsize[0] / uncovered_outsize[0],
func.kw * outsize[1] / uncovered_outsize[1],
]
else:
scales = [1.0, 1.0, float(func.kh), float(func.kw)]
if opset_version == 7:
return onnx_helper.make_node('Upsample', input_names, output_names,
scales=scales),
scales_name = context.add_const(
np.array(scales, dtype=np.float32), 'scales')
if opset_version in [9, 10]:
input_names.append(scales_name)
op = 'Upsample' if opset_version == 9 else 'Resize'
return onnx_helper.make_node(op, input_names, output_names),
if opset_version == 11:
roi_name = context.add_const(np.array([]), 'roi')
input_names.extend([roi_name, scales_name])
return onnx_helper.make_node('Resize', input_names, output_names),
| 6,863
| 33.149254
| 79
|
py
|
chainer
|
chainer-master/onnx_chainer/functions/activation.py
|
import numpy as np
from onnx_chainer.functions.opset_version import support
from onnx_chainer import onnx_helper
def _convert_softmax_impl(op_type, func, input_names, output_names):
axis = func.axis
ndim = len(func.inputs[0].shape)
if axis == ndim - 1:
return onnx_helper.make_node(
op_type, input_names, output_names,
axis=axis
),
# Chainer's softmax computes the softmax along a single axis while
# ONNX's computes along the specified axis and all axes after the
# specified axis. To emulate Chainer's by ONNX's, we transpose the
# single specified axis to the last axis, compute the softmax, and
# transpose back to the original shape.
gb = onnx_helper.GraphBuilder()
perm = list(range(ndim))
perm[axis], perm[-1] = perm[-1], perm[axis]
transposed = gb.op('Transpose', input_names, perm=perm)
softmaxed = gb.op(op_type, [transposed], axis=ndim - 1)
gb.op('Transpose', [softmaxed], perm=perm)
return gb.nodes(output_names=output_names)
@support((1, 6, 11))
def convert_ClippedReLU(
func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Clip', input_names, output_names,
min=0.0, max=func.cap,
consumed_inputs=[1]
),
elif opset_version == 6:
return onnx_helper.make_node(
'Clip', input_names, output_names,
min=0.0, max=func.cap,
),
elif opset_version == 11:
min_name = context.add_const(np.array(0, dtype=np.float32), 'zero')
max_name = context.add_const(
np.array(func.cap, dtype=np.float32), 'clip_z')
input_names.extend([min_name, max_name])
return onnx_helper.make_node('Clip', input_names, output_names),
@support((1, 6))
def convert_ELU(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Elu', input_names, output_names,
alpha=func.alpha,
),
elif opset_version == 6:
return onnx_helper.make_node(
'Elu', input_names, output_names,
alpha=func.alpha
),
@support((1, 6))
def convert_HardSigmoid(
func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'HardSigmoid', input_names, output_names,
alpha=0.2,
beta=0.5,
consumed_inputs=[1],
),
elif opset_version == 6:
return onnx_helper.make_node(
'HardSigmoid', input_names, output_names,
alpha=0.2,
beta=0.5
),
@support((1, 6))
def convert_LeakyReLU(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'LeakyRelu', input_names, output_names,
alpha=func.slope,
consumed_inputs=[1],
),
elif opset_version == 6:
return onnx_helper.make_node(
'LeakyRelu', input_names, output_names,
alpha=func.slope
),
def convert_LogSoftmax(
func, opset_version, input_names, output_names, context):
return _convert_softmax_impl('LogSoftmax', func, input_names, output_names)
@support((1, 6, 7))
def convert_PReLUFunction(
func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'PRelu', input_names, output_names, consumed_inputs=[1]),
elif opset_version == 6:
return onnx_helper.make_node('PRelu', input_names, output_names),
elif opset_version == 7:
return onnx_helper.make_node('PRelu', input_names, output_names),
@support((1, 6))
def convert_ReLU(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Relu', input_names, output_names,
consumed_inputs=[1]),
elif opset_version == 6:
return onnx_helper.make_node('Relu', input_names, output_names),
@support((1, 6))
def convert_Selu(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Selu', input_names, output_names,
consumed_inputs=[1],
alpha=func.alpha,
gamma=func.scale
),
elif opset_version == 6:
return onnx_helper.make_node('Selu', input_names, output_names,
alpha=func.alpha,
gamma=func.scale
),
@support((1, 6))
def convert_Sigmoid(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Sigmoid', input_names, output_names,
consumed_inputs=[1]),
elif opset_version == 6:
return onnx_helper.make_node('Sigmoid', input_names, output_names),
def convert_Softmax(func, opset_version, input_names, output_names, context):
return _convert_softmax_impl('Softmax', func, input_names, output_names)
def convert_Softplus(func, opset_version, input_names, output_names, context):
return onnx_helper.make_node('Softplus', input_names, output_names),
@support((1, 6))
def convert_Tanh(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Tanh', input_names, output_names,
consumed_inputs=[1]),
elif opset_version == 6:
return onnx_helper.make_node('Tanh', input_names, output_names),
| 5,706
| 32.970238
| 79
|
py
|
chainer
|
chainer-master/onnx_chainer/functions/math.py
|
import numpy as np
from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE
from onnx_chainer.functions.opset_version import support
from onnx_chainer import onnx_helper
@support((1, 6, 7))
def convert_Add(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Add', input_names, output_names, consumed_inputs=[1, 1]),
elif opset_version == 6 or opset_version == 7:
return onnx_helper.make_node('Add', input_names, output_names),
@support((1, 6, 7))
def convert_AddConstant(
func, opset_version, input_names, output_names, context):
value_name = context.add_const(
np.array(func.value, dtype=func.inputs[0].dtype), 'value')
input_names.append(value_name)
if opset_version == 1:
return onnx_helper.make_node(
'Add', input_names, output_names, consumed_inputs=[1, 1]),
elif opset_version == 6 or opset_version == 7:
return onnx_helper.make_node('Add', input_names, output_names),
@support((1, 6, 7))
def convert_Sub(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Sub', input_names, output_names, consumed_inputs=[1, 1]),
elif opset_version == 6 or opset_version == 7:
return onnx_helper.make_node('Sub', input_names, output_names),
@support((1, 6, 7))
def convert_SubFromConstant(
func, opset_version, input_names, output_names, context):
value_name = context.add_const(
np.array(func.value, dtype=func.inputs[0].dtype), 'value')
input_names[:0] = [value_name]
if opset_version == 1:
return onnx_helper.make_node(
'Sub', input_names, output_names, consumed_inputs=[1, 1]),
elif opset_version == 6 or opset_version == 7:
return onnx_helper.make_node('Sub', input_names, output_names),
@support((1, 6, 7))
def convert_Mul(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Mul', input_names, output_names, consumed_inputs=[1, 1]),
elif opset_version == 6 or opset_version == 7:
return onnx_helper.make_node('Mul', input_names, output_names),
@support((1, 6, 7))
def convert_MulConstant(
func, opset_version, input_names, output_names, context):
value_name = context.add_const(
np.array(func.value, dtype=func.inputs[0].dtype), 'value')
input_names.append(value_name)
if opset_version == 1:
return onnx_helper.make_node(
'Mul', input_names, output_names, consumed_inputs=[1, 1]),
elif opset_version == 6 or opset_version == 7:
return onnx_helper.make_node('Mul', input_names, output_names),
@support((1, 6))
def convert_Neg(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Neg', input_names, output_names, consumed_inputs=[1, 1]),
elif opset_version == 6:
return onnx_helper.make_node('Neg', input_names, output_names),
@support((1, 6, 7))
def convert_Div(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Div', input_names, output_names, consumed_inputs=[1, 1]),
elif opset_version == 6 or opset_version == 7:
return onnx_helper.make_node('Div', input_names, output_names),
@support((1, 6, 7))
def convert_DivFromConstant(
func, opset_version, input_names, output_names, context):
value_name = context.add_const(
np.array(func.value, dtype=func.inputs[0].dtype), 'value')
input_names[:0] = [value_name]
if opset_version == 1:
return onnx_helper.make_node(
'Div', input_names, output_names, consumed_inputs=[1, 1]),
elif opset_version == 6 or opset_version == 7:
return onnx_helper.make_node('Div', input_names, output_names),
@support((1, 6))
def convert_Absolute(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Abs', input_names, output_names, consumed_inputs=[1]),
elif opset_version == 6:
return onnx_helper.make_node('Abs', input_names, output_names),
@support((7,))
def convert_Arccos(func, opset_version, input_names, output_names, context):
return onnx_helper.make_node('Acos', input_names, output_names),
@support((7,))
def convert_Arcsin(func, opset_version, input_names, output_names, context):
return onnx_helper.make_node('Asin', input_names, output_names),
@support((7,))
def convert_Arctan(func, opset_version, input_names, output_names, context):
return onnx_helper.make_node('Atan', input_names, output_names),
@support((1, 7))
def convert_PowConstVar(
func, opset_version, input_names, output_names, context):
value_name = context.add_const(
np.array(func.value, dtype=func.inputs[0].dtype), 'value')
input_names.insert(0, value_name)
if opset_version == 1 or opset_version == 7:
return onnx_helper.make_node('Pow', input_names, output_names),
@support((1, 7))
def convert_PowVarConst(
func, opset_version, input_names, output_names, context):
value_name = context.add_const(
np.array(func.value, dtype=func.inputs[0].dtype), 'value')
input_names.append(value_name)
if opset_version == 1 or opset_version == 7:
return onnx_helper.make_node('Pow', input_names, output_names),
@support((1, 7))
def convert_PowVarVar(
func, opset_version, input_names, output_names, context):
if opset_version == 1 or opset_version == 7:
return onnx_helper.make_node('Pow', input_names, output_names),
@support((1, 6, 11))
def convert_Clip(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Clip', input_names, output_names,
max=func.x_max,
min=func.x_min,
consumed_inputs=[1]
),
elif opset_version == 6:
return onnx_helper.make_node(
'Clip', input_names, output_names,
max=func.x_max,
min=func.x_min,
),
elif opset_version == 11:
min_name = context.add_const(
np.array(func.x_min, dtype=np.float32), 'x_min')
max_name = context.add_const(
np.array(func.x_max, dtype=np.float32), 'x_max')
input_names.extend([min_name, max_name])
return onnx_helper.make_node('Clip', input_names, output_names),
@support((7,))
def convert_Cos(func, opset_version, input_names, output_names, context):
return onnx_helper.make_node('Cos', input_names, output_names),
@support((9,))
def convert_Cosh(func, opset_version, input_names, output_names, context):
return onnx_helper.make_node('Cosh', input_names, output_names),
@support((1, 6))
def convert_Exp(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Exp', input_names, output_names, consumed_inputs=[1, 1]),
elif opset_version == 6:
return onnx_helper.make_node('Exp', input_names, output_names),
def convert_Identity(func, opset_version, input_names, output_names, context):
return onnx_helper.make_node('Identity', input_names, output_names),
def convert_MatMul(func, opset_version, input_names, output_names, context):
ndim_a = len(func.inputs[0].shape)
ndim_b = len(func.inputs[1].shape)
gb = onnx_helper.GraphBuilder()
if ndim_a > 1 and func.transa:
perm = list(range(ndim_a))
perm[-1], perm[-2] = perm[-2], perm[-1]
input_names[0] = gb.op('Transpose', [input_names[0]], perm=perm)
if ndim_b > 1 and func.transb:
perm = list(range(ndim_b))
perm[-1], perm[-2] = perm[-2], perm[-1]
input_names[1] = gb.op('Transpose', [input_names[1]], perm=perm)
gb.op('MatMul', input_names)
return gb.nodes(output_names)
@support((1, 6, 8))
def convert_Maximum(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Max', input_names, output_names, consumed_inputs=[1, 1]),
elif opset_version == 6 or opset_version == 8:
return onnx_helper.make_node('Max', input_names, output_names),
@support((1, 6, 8))
def convert_Minimum(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Min', input_names, output_names, consumed_inputs=[1, 1]),
elif opset_version == 6 or opset_version == 8:
return onnx_helper.make_node('Min', input_names, output_names),
@support((7,))
def convert_Sin(func, opset_version, input_names, output_names, context):
return onnx_helper.make_node('Sin', input_names, output_names),
@support((9,))
def convert_Sinh(func, opset_version, input_names, output_names, context):
return onnx_helper.make_node('Sinh', input_names, output_names),
@support((1, 6))
def convert_Sqrt(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Sqrt', input_names, output_names, consumed_inputs=[1, 1]),
elif opset_version == 6:
return onnx_helper.make_node('Sqrt', input_names, output_names),
def convert_RsqrtGPU(func, opset_version, input_names, output_names, context):
gb = onnx_helper.GraphBuilder()
sqrt_out = gb.op('Sqrt', input_names)
gb.op('Reciprocal', [sqrt_out])
return gb.nodes(output_names)
@support((6,))
def convert_Log(func, opset_version, input_names, output_names, context):
return onnx_helper.make_node('Log', input_names, output_names),
def convert_LogSumExp(func, opset_version, input_names, output_names, context):
# Use keepdims=False by default
# since the chainer does not support keepdims option
kwargs = {'keepdims': False}
if hasattr(func, 'keepdims'):
kwargs['keepdims'] = func.keepdims
if func.axis is not None:
kwargs['axes'] = func.axis
return onnx_helper.make_node(
'ReduceLogSumExp', input_names, output_names, **kwargs),
def convert_Max(func, opset_version, input_names, output_names, context):
kwargs = {'keepdims': func.keepdims}
if func.axis is not None:
kwargs['axes'] = func.axis
return onnx_helper.make_node(
'ReduceMax', input_names, output_names, **kwargs),
def convert_Mean(func, opset_version, input_names, output_names, context):
kwargs = {'keepdims': func.keepdims}
if func.axis is not None:
kwargs['axes'] = func.axis
return onnx_helper.make_node(
'ReduceMean', input_names, output_names, **kwargs),
def convert_Min(func, opset_version, input_names, output_names, context):
kwargs = {'keepdims': func.keepdims}
if func.axis is not None:
kwargs['axes'] = func.axis
return onnx_helper.make_node(
'ReduceMin', input_names, output_names, **kwargs),
def convert_Prod(func, opset_version, input_names, output_names, context):
kwargs = {'keepdims': func.keepdims}
if func.axis is not None:
kwargs['axes'] = func.axis
return onnx_helper.make_node(
'ReduceProd', input_names, output_names, **kwargs),
def convert_Sum(func, opset_version, input_names, output_names, context):
kwargs = {'keepdims': func.keepdims}
if func.axis is not None:
kwargs['axes'] = func.axis
return onnx_helper.make_node(
'ReduceSum', input_names, output_names, **kwargs),
@support((7,))
def convert_Tan(func, opset_version, input_names, output_names, context):
return onnx_helper.make_node('Tan', input_names, output_names),
@support((1, 6, 7))
def convert_LinearInterpolate(
func, opset_version, input_names, output_names, context):
typ = func.inputs[0].dtype if isinstance(
func.inputs[0].dtype, np.dtype) else np.dtype(func.inputs[0].dtype)
one_name = context.add_const(np.array(1, dtype=typ), 'one')
kwargs = {'consumed_inputs': [1, 1]} if opset_version == 1 else {}
kwargs2 = {} if opset_version >= 7 else {'broadcast': 1}
gb = onnx_helper.GraphBuilder()
p, x, y = input_names
n1 = gb.op('Sub', [one_name, p], **kwargs, **kwargs2)
n2 = gb.op('Mul', [p, x], **kwargs)
n3 = gb.op('Mul', [n1, y], **kwargs)
gb.op_output_named('Add', [n2, n3], output_names, **kwargs)
return gb.nodes()
@support((1, 6, 7))
def convert_Square(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Mul', [input_names[0], input_names[0]], output_names,
consumed_inputs=[1, 1]),
elif opset_version == 6 or opset_version == 7:
return onnx_helper.make_node(
'Mul', [input_names[0], input_names[0]], output_names),
@support((8,))
def convert_BroadcastTo(
func, opset_version, input_names, output_names, context):
shape_name = context.add_const(
np.array(func._shape, dtype=np.int64), 'shape')
input_names.append(shape_name)
return onnx_helper.make_node('Expand', input_names, output_names),
def _argminmax_nodes(op_name, func, input_names, output_names, context):
gb = onnx_helper.GraphBuilder()
target_input_names = input_names
axis = func.axis
if axis is None:
shape_name = context.add_const(np.array([-1], dtype=np.int64), 'shape')
input_names.append(shape_name)
target_input_names = [gb.op('Reshape', input_names)]
axis = 0
out = gb.op(op_name, target_input_names, axis=axis, keepdims=0)
# Chainer's ArgMax always return value as int32
# Cast spec is changed from opset6, this logic does not support ~opset5
gb.op('Cast', [out], to=NP_TYPE_TO_TENSOR_TYPE[np.dtype('int32')])
return gb.nodes(output_names)
@support((6,))
def convert_ArgMax(func, opset_version, input_names, output_names, context):
return _argminmax_nodes('ArgMax', func, input_names, output_names, context)
@support((6,))
def convert_ArgMin(func, opset_version, input_names, output_names, context):
return _argminmax_nodes('ArgMin', func, input_names, output_names, context)
@support((9,))
def convert_sign(func, opset_verseion, input_names, output_names, context):
return onnx_helper.make_node('Sign', input_names, output_names),
| 14,447
| 34.940299
| 79
|
py
|
chainer
|
chainer-master/onnx_chainer/functions/array.py
|
import warnings
import chainer
import numpy as np
import onnx
from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE
from onnx_chainer.functions.opset_version import support
from onnx_chainer import onnx_helper
TENSOR_TYPE_TO_NAME = {
0: 'UNDEFINED',
1: 'FLOAT',
2: 'UINT8',
3: 'INT8',
4: 'UINT16',
5: 'INT16',
6: 'INT32',
7: 'INT64',
8: 'STRING',
9: 'BOOL',
10: 'FLOAT16',
11: 'DOUBLE',
12: 'UINT32',
13: 'UINT64',
14: 'COMPLEX64',
15: 'COMPLEX128',
}
@support((1, 6))
def convert_Cast(func, opset_version, input_names, output_names, context):
typ = func.type if isinstance(func.type, np.dtype) else np.dtype(func.type)
if opset_version == 1:
return onnx_helper.make_node(
'Cast', input_names, output_names,
to=TENSOR_TYPE_TO_NAME[NP_TYPE_TO_TENSOR_TYPE[typ]]
),
elif opset_version == 6:
return onnx_helper.make_node(
'Cast', input_names, output_names,
to=NP_TYPE_TO_TENSOR_TYPE[typ]
),
@support((1, 4))
def convert_Concat(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Concat', input_names, output_names,
axis=func.axis
),
elif opset_version == 4:
return onnx_helper.make_node(
'Concat', input_names, output_names,
axis=func.axis
),
def convert_Copy(func, opset_version, input_names, output_names, context):
return onnx_helper.make_node(
'Identity', input_names, output_names
),
def convert_Depth2Space(
func, opset_version, input_names, output_names, context):
return onnx_helper.make_node(
'DepthToSpace', input_names, output_names,
blocksize=func.r
),
def get_slice_node(
gb, opset_version, context, input_names, axes, starts, ends, steps):
if opset_version < 11 and any([i != 1 for i in steps]):
raise ValueError(
'GetItem with n-step slicing is supported from opset11, '
'opset{} is not supported'.format(opset_version))
if opset_version < 10:
return gb.op(
'Slice', input_names, axes=axes, starts=starts, ends=ends)
else:
inputs = [('starts', starts), ('ends', ends), ('axes', axes)]
if opset_version > 10:
inputs.append(('steps', steps))
for name, values in inputs:
param_name = context.add_const(
np.asarray(list(values), dtype=np.int64), name)
input_names.append(param_name)
return gb.op('Slice', input_names)
def _to_ndarray(x, dtype=np.int64):
if isinstance(x, list):
return np.array(x, dtype=dtype)
else:
return chainer.cuda.to_cpu(x).astype(dtype)
@support((1, 10, 11))
def convert_GetItem(func, opset_version, input_names, output_names, context):
x = func.inputs[0]
axes, starts, ends, steps = [], [], [], []
squeeze_idxs, unsqueeze_idxs = [], []
skipped = 0 # when set ellipsis, need to skip index rolling
prev_gathered_axis = -1
gather_axis = -1
gather_idx = None # when GatherND, set first array for broadcasting
gather_nd_idx = None
is_used_slice_whole = False # GatherND does not support axis, need to care
for i, idx in enumerate(func.slices):
# axis means the index of input x, adjust None and Ellipsis counts
axis = i - len(unsqueeze_idxs) + skipped
if isinstance(idx, slice):
if idx.start is None and idx.stop is None and idx.step is None:
is_used_slice_whole = True
continue
axes.append(axis)
step = 1 if idx.step is None else idx.step
steps.append(step)
if step < 0:
starts.append(
np.iinfo(np.int64).max if idx.start is None else idx.start)
ends.append(
np.iinfo(np.int64).min if idx.stop is None else idx.stop)
else:
starts.append(0 if idx.start is None else idx.start)
ends.append(
np.iinfo(np.int64).max if idx.stop is None else idx.stop)
elif isinstance(idx, int):
axes.append(axis)
steps.append(1)
if idx == -1:
starts.append(idx)
ends.append(np.iinfo(np.int64).max)
else:
starts.append(idx)
ends.append(idx+1)
squeeze_idxs.append(axis)
elif isinstance(idx, np.ndarray) and idx.ndim == 0:
scalar_idx = idx.item()
axes.append(axis)
starts.append(scalar_idx)
ends.append(scalar_idx+1)
steps.append(1)
squeeze_idxs.append(axis)
elif idx is None:
unsqueeze_idxs.append(i - len(squeeze_idxs) + skipped)
elif idx is Ellipsis:
# calculate rest slice number except None, GetItem does not allow
# multiple Ellipsis, so ignore latter Ellipsis count
rest_slice_len = len(
[idx_ for idx_ in func.slices[i+1:] if idx_ is not None])
assert skipped == 0
skipped = len(x.shape) - axis - rest_slice_len - 1
elif isinstance(idx, (list,) + chainer.get_array_types()):
if prev_gathered_axis >= 0:
if (i - 1) != prev_gathered_axis:
raise ValueError(
'ONNX-Chainer does not support non-consecutive'
'multiple advanced indexing')
if is_used_slice_whole:
raise ValueError(
'ONNX-Chainer does not support whole indexing(`[:]`)'
'in front of multiple advanced indexing')
if unsqueeze_idxs:
raise ValueError(
'ONNX-Chainer does not support new axis in front of '
'multiple advanced indexing')
# multiple advanced index, convert to GatherND
idx_array = _to_ndarray(idx)
base_idx = gather_idx if gather_nd_idx is None else\
gather_nd_idx
gather_nd_idx = np.vstack((base_idx, idx_array))
prev_gathered_axis = i
else:
# convert to Gather, if next index is also list, change to
# GatherND
gather_axis = axis - len(squeeze_idxs) + len(unsqueeze_idxs)
gather_idx = _to_ndarray(idx)
prev_gathered_axis = i
else:
raise ValueError(
'GetItem with type {} cannot handle in ONNX Slice, so that '
'ONNX-Chainer does not accept the type'.format(type(idx)))
gb = onnx_helper.GraphBuilder()
slice_output = input_names
if axes:
output = get_slice_node(
gb, opset_version, context, slice_output, axes, starts, ends,
steps)
slice_output = [output]
if squeeze_idxs:
output = gb.op('Squeeze', slice_output, axes=squeeze_idxs)
slice_output = [output]
if unsqueeze_idxs:
output = gb.op('Unsqueeze', slice_output, axes=unsqueeze_idxs)
slice_output = [output]
if gather_nd_idx is not None:
if opset_version < 11:
raise ValueError(
'ONNX-Chainer supports multiple advanced indexing from opset11'
', opset{} is not supported'.format(opset_version))
gather_nd_idx_name = context.add_const(gather_nd_idx.T, 'indices')
slice_output.append(gather_nd_idx_name)
gb.op('GatherND', slice_output)
elif gather_idx is not None:
gather_idx_name = context.add_const(gather_idx, 'indices')
slice_output.append(gather_idx_name)
gb.op('Gather', slice_output, axis=gather_axis)
return gb.nodes(output_names=output_names)
@support((9, 11))
def convert_SelectItem(func, opset_version, input_names, output_names,
context):
gb = onnx_helper.GraphBuilder()
if opset_version >= 11:
t = gb.op('Unsqueeze', [input_names[1]], axes=[1])
out = gb.op('GatherElements', [input_names[0], t], axis=1)
gb.op('Squeeze', [out], axes=[1])
else:
data, target_idxs = input_names
target_idxs = gb.op('Cast', [target_idxs],
to=NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')])
n_rows = gb.op('Shape', [target_idxs])
# This is an equivalent of using Range.
one_1 = onnx.helper.make_tensor(
'one_1', onnx.TensorProto.FLOAT, [1], [1])
ones = gb.op('ConstantOfShape', [n_rows], value=one_1)
row_idxs = gb.op('Squeeze', [gb.op('NonZero', [ones])])
data_shape = gb.op('Shape', [data])
one_2 = context.add_const(np.array([1]), 'one_2')
n_cols = gb.op('Gather', [data_shape, one_2], axis=0)
data = gb.op('Squeeze', [gb.op('Flatten', [data], axis=2)])
target_idxs = gb.op(
'Add', [target_idxs, gb.op('Mul', [row_idxs, n_cols])])
gb.op('Gather', [data, target_idxs], axis=0)
return gb.nodes(output_names)
@support((1, 2, 11))
def convert_Pad(func, opset_version, input_names, output_names, context):
if func.mode not in ['constant', 'reflect', 'edge']:
raise ValueError(
'{} mode is not supported in ONNX\'s Pad operation'.format(
func.mode))
pad_begin = []
pad_end = []
pad_bw = func.pad_bw
if pad_bw.ndim == 1:
pad_bw = np.tile(pad_bw, (len(func.inputs[0].shape), 1))
for pp in pad_bw.tolist():
pad_begin.append(pp[0])
pad_end.append(pp[1])
pad = pad_begin + pad_end
constant_value = func.keywords.get('constant_values', None)
if constant_value is not None:
# 'constant_values' only accepts int or array-like on Chainer
if not isinstance(constant_value, int) and len(constant_value) > 1:
raise ValueError(
'ONNX doesn\'t support multiple constant values for Pad '
'operation')
elif not isinstance(constant_value, int):
constant_value = float(constant_value[0])
else:
constant_value = float(constant_value)
if opset_version == 1:
kwargs = {
'mode': func.mode,
'paddings': pad,
}
if constant_value is not None:
kwargs['value'] = constant_value
elif opset_version == 2:
kwargs = {
'mode': func.mode,
'pads': pad,
}
if constant_value is not None:
kwargs['value'] = constant_value
elif opset_version == 11:
pads_name = context.add_const(np.array(pad, dtype=np.int64), 'pads')
input_names.append(pads_name)
if constant_value is not None:
constant_value_name = context.add_const(
np.array(constant_value, dtype=np.float32), 'constant_value')
input_names.append(constant_value_name)
kwargs = {'mode': func.mode}
return onnx_helper.make_node('Pad', input_names, output_names, **kwargs),
@support((9, 11))
def convert_Permutate(func, opset_version, input_names, output_names, context):
gb = onnx_helper.GraphBuilder()
indices_name = context.get_name(func.indices)
if func.inv:
empty = context.add_const(
np.zeros(dtype=np.int64, shape=func.indices.shape), 'empty')
r = context.add_const(np.arange(len(func.indices), dtype=np.int64),
'range')
op = 'ScatterElements' if opset_version == 11 else 'Scatter'
indices_name = gb.op(op, [empty, indices_name, r])
input_names.append(indices_name)
gb.op_output_named('Gather', input_names, output_names, axis=func.axis)
return gb.nodes()
@support((1, 5))
def convert_Reshape(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Reshape', input_names, output_names,
shape=func.shape
),
elif opset_version == 5:
if hasattr(func, 'shape'):
# if the function has shape parameter, means not dynamic
assert len(input_names) == 1
shape_name = context.add_const(
np.asarray(list(func.shape), dtype=np.int64), 'shape')
input_names.append(shape_name)
else:
if len(input_names) != 2:
raise ValueError('shape must be set as parameter or 2nd input')
return onnx_helper.make_node(
'Reshape', input_names, output_names,
),
def convert_Space2Depth(
func, opset_version, input_names, output_names, context):
return onnx_helper.make_node(
'SpaceToDepth', input_names, output_names,
blocksize=func.r
),
@support((1, 2))
def convert_SplitAxis(func, opset_version, input_names, output_names, context):
if func.indices is not None:
indices_or_sections = func.indices
else:
indices_or_sections = func.sections
total = func.inputs[0].shape[func.axis]
if hasattr(indices_or_sections, '__iter__'):
split = []
prev_i = 0
for i in indices_or_sections:
split.append(i - prev_i)
prev_i = i
split.append(total - prev_i)
else:
length = total // indices_or_sections
split = [length for _ in range(indices_or_sections)]
assert len(output_names) == len(split)
if opset_version == 1:
return onnx_helper.make_node(
'Split', input_names, output_names,
axis=func.axis,
split=split
),
elif opset_version == 2:
return onnx_helper.make_node(
'Split', input_names, output_names,
axis=func.axis,
split=split
),
def convert_Squeeze(func, opset_version, input_names, output_names, context):
if func.axis is None:
axis = []
for i, s in enumerate(func.inputs[0].shape):
if s == 1:
axis.append(i)
else:
axis = func.axis
return onnx_helper.make_node(
'Squeeze', input_names, output_names,
axes=axis
),
def convert_Swapaxes(func, opset_version, input_names, output_names, context):
perm = list(range(len(func.inputs[0].shape)))
perm[func.axis1], perm[func.axis2] = perm[func.axis2], perm[func.axis1]
return onnx_helper.make_node(
'Transpose', input_names, output_names, perm=perm
),
@support((1, 6))
def convert_Tile(func, opset_version, input_names, output_names, context):
# Add tiles and axis to graph
if isinstance(func.reps, int):
func.reps = [func.reps]
tiles_name = context.add_const(
np.asarray(func.reps, dtype=np.int64), 'tiles')
input_names.append(tiles_name)
# In operater version = 1, axis also should be given
if opset_version == 1:
axis_name = context.add_const(
np.array([i for i, _ in enumerate(func.reps)], dtype=np.float32),
'axis')
input_names.append(axis_name)
return onnx_helper.make_node('Tile', input_names, output_names),
def convert_Transpose(func, opset_version, input_names, output_names, context):
if func.axes is None:
node = onnx_helper.make_node('Transpose', input_names, output_names)
else:
node = onnx_helper.make_node(
'Transpose', input_names, output_names,
perm=func.axes
)
return node,
def convert_ExpandDims(
func, opset_version, input_names, output_names, context):
axis = func.axis
if axis < 0:
axis = len(func.inputs[0].shape) + 1 + axis
return onnx_helper.make_node(
'Unsqueeze', input_names, output_names, axes=[axis]),
@support((9,))
def convert_Where(func, opset_version, input_names, output_names, context):
input_names.insert(0, context.get_name(func.condition))
return onnx_helper.make_node('Where', input_names, output_names),
@support((7, 9, 10, 11))
def convert_Repeat(func, opset_version, input_names, output_names, context):
repeats = func.repeats
if len(repeats) > 1:
raise NotImplementedError(
'ONNX-Chainer currently does not support elementwise repeat')
gb = onnx_helper.GraphBuilder()
inputs = list(input_names)
axis = func.axis
if axis is None:
shape_name = context.add_const(np.array([-1], dtype=np.int64), 'shape')
input_names.append(shape_name)
inputs = [gb.op('Reshape', input_names)]
scales = [float(repeats[0])]
else:
scales = [1.0] * func.inputs[0].data.ndim
scales[axis] = float(repeats[0])
if opset_version == 7:
gb.op_output_named('Upsample', inputs, output_names, scales=scales)
return gb.nodes()
scales_name = context.add_const(
np.array(scales, dtype=np.float32), 'scales')
if opset_version in [9, 10]:
inputs.append(scales_name)
op = 'Upsample' if opset_version == 9 else 'Resize'
gb.op_output_named(op, inputs, output_names)
return gb.nodes()
if opset_version == 11:
roi = context.add_const(np.array([]), 'roi')
inputs.extend([roi, scales_name])
gb.op_output_named('Resize', inputs, output_names)
return gb.nodes()
@support((7, 9, 10, 11))
def convert_ResizeImages(
func, opset_version, input_names, output_names, context):
warnings.warn(
'`resize_images` is mapped to `Upsampling` ONNX op with bilinear '
'interpolation. '
'Behavior of bilinear interpolation differs from each implementation. '
'See the issue https://github.com/chainer/onnx-chainer/issues/147 '
'for details.',
UserWarning)
outsize = (func.out_H, func.out_W)
h, w = func.inputs[0].shape[2:]
# Compute scaling factor.
# NOTE(syoyo): Despite of its name, `Upsample` onnx op will downsample
# images when scale value is less than 1.0
scales = [1.0, 1.0, float(outsize[0]) / float(h),
float(outsize[1]) / float(w)]
if (scales[2] < 1.0e-8) and (scales[3] < 1.0e-8):
raise ValueError(
'scaling factor is too small or zero. scales for h = {}, scales '
'for w = {}'.format(scales[2], scales[3]))
# resize_images in Chainer only supports bilinear interpolation
# Actually this will be mapped to 'bilinear' in onnxruntime
mode = 'linear'
if opset_version == 7:
return onnx_helper.make_node('Upsample', input_names, output_names,
scales=scales, mode=mode),
scales_name = context.add_const(
np.array(scales, dtype=np.float32), 'scales')
if opset_version in [9, 10]:
input_names.append(scales_name)
op = 'Upsample' if opset_version == 9 else 'Resize'
return onnx_helper.make_node(op, input_names, output_names,
mode=mode),
if opset_version == 11:
roi_name = context.add_const(np.array([]), 'roi')
input_names.extend([roi_name, scales_name])
return onnx_helper.make_node(
'Resize', input_names, output_names, mode=mode),
def convert_Stack(func, opset_version, input_names, output_names, context):
gb = onnx_helper.GraphBuilder()
axis = func.axis
if axis < 0:
axis = len(func.inputs[0].shape) + 1 + axis
# To use concat op, reshape every inputs add new axes
inputs = [gb.op('Unsqueeze', [name], axes=[axis]) for name in input_names]
gb.op_output_named('Concat', inputs, output_names, axis=axis)
return gb.nodes()
def convert_Hstack(func, opset_version, input_names, output_names, context):
gb = onnx_helper.GraphBuilder()
input0_ndim = len(func.inputs[0].shape)
inputs = input_names
axis = 1
if input0_ndim == 0:
inputs = [gb.op('Unsqueeze', [name], axes=[0]) for name in input_names]
axis = 0
elif input0_ndim == 1:
axis = 0
gb.op_output_named('Concat', inputs, output_names, axis=axis)
return gb.nodes()
def convert_Vstack(func, opset_version, input_names, output_names, context):
gb = onnx_helper.GraphBuilder()
input0_ndim = len(func.inputs[0].shape)
inputs = input_names
if input0_ndim == 0:
inputs = [gb.op('Unsqueeze', [name], axes=[0, 1]) for
name in input_names]
elif input0_ndim == 1:
inputs = [gb.op('Unsqueeze', [name], axes=[0]) for name in input_names]
gb.op_output_named('Concat', inputs, output_names, axis=0)
return gb.nodes()
def convert_Dstack(func, opset_version, input_names, output_names, context):
gb = onnx_helper.GraphBuilder()
input0_ndim = len(func.inputs[0].shape)
inputs = input_names
if input0_ndim == 0:
inputs = [gb.op('Unsqueeze', [name], axes=[0, 1, 2]) for
name in input_names]
elif input0_ndim == 1:
inputs = [gb.op('Unsqueeze', [name], axes=[0, 2]) for
name in input_names]
elif input0_ndim == 2:
inputs = [gb.op('Unsqueeze', [name], axes=[2]) for name in input_names]
gb.op_output_named('Concat', inputs, output_names, axis=2)
return gb.nodes()
def convert_Separate(func, opset_version, input_names, output_names, context):
gb = onnx_helper.GraphBuilder()
split_outs = gb.op(
'Split', input_names, num_outputs=len(output_names), axis=func.axis)
if len(output_names) == 1:
split_outs = [split_outs]
for i, node_name in enumerate(split_outs):
gb.op_output_named(
'Squeeze', [node_name], [output_names[i]], axes=[func.axis])
return gb.nodes()
def convert_Shape(func, opset_version, input_names, output_names, context):
return onnx_helper.make_node('Shape', input_names, output_names),
def convert_Moveaxis(func, opset_version, input_names, output_names, context):
ndim = len(func.inputs[0].shape)
source = [a % ndim for a in func.source]
destination = [a % ndim for a in func.destination]
order = [n for n in range(ndim) if n not in source]
for dest, src in sorted(zip(destination, source)):
order.insert(dest, src)
node = onnx_helper.make_node('Transpose', input_names, output_names,
perm=order)
return node,
def convert_Rollaxis(func, opset_version, input_names, output_names, context):
ndim = len(func.inputs[0].shape)
order = list(range(ndim))
order.remove(func.axis)
order.insert(func.start, func.axis)
node = onnx_helper.make_node('Transpose', input_names, output_names,
perm=order)
return node,
def convert_TransposeSequence(
func, opset_version, input_names, output_names, context):
if any(x.shape != func.inputs[0].shape for x in func.inputs):
raise ValueError(
'ONNX-Chainer can convert TransposeSequence only when all '
'inputs have same shape')
gb = onnx_helper.GraphBuilder()
n = func.inputs[0].shape[0]
concat_out = gb.op(
'Concat',
[gb.op('Unsqueeze', [name], axes=[0]) for name in input_names],
axis=0)
perm = list(range(len(func.inputs[0].shape) + 1))
perm[0], perm[1] = perm[1], perm[0]
transpose_out = gb.op('Transpose', [concat_out], perm=perm)
split_outs = gb.op('Split', [transpose_out], axis=0, num_outputs=n)
if n == 1:
split_outs = [split_outs]
for i, name in enumerate(split_outs):
gb.op_output_named('Squeeze', [name], [output_names[i]], axes=[0])
return gb.nodes()
| 23,717
| 34.085799
| 79
|
py
|
chainer
|
chainer-master/onnx_chainer/functions/connection.py
|
import numpy as np
from onnx_chainer.functions.opset_version import support
from onnx_chainer import onnx_helper
def convert_Convolution2DFunction(
func, opset_version, input_names, output_names, context):
if hasattr(func, 'dy') and hasattr(func, 'dx'):
node = onnx_helper.make_node(
'Conv', input_names, output_names,
dilations=(func.dy, func.dx),
kernel_shape=func.inputs[1].shape[2:],
# pads: [x1_begin, x2_begin...x1_end, x2_end,...]
pads=(func.ph, func.pw, func.ph, func.pw),
strides=(func.sy, func.sx),
group=func.groups,
)
else:
node = onnx_helper.make_node(
'Conv', input_names, output_names,
dilations=(1, 1),
kernel_shape=func.inputs[1].shape[2:],
pads=(func.ph, func.pw, func.ph, func.pw),
strides=(func.sy, func.sx),
group=func.groups,
)
return node,
def convert_ConvolutionND(
func, opset_version, input_names, output_names, context):
pad = []
x_ndim = len(func.inputs[0].shape)
w_ndim = len(func.inputs[1].shape)
for _ in range(x_ndim - w_ndim):
pad.append(0)
for p in func.pad:
pad.append(p)
pad = pad * 2
return onnx_helper.make_node(
'Conv', input_names, output_names,
kernel_shape=func.inputs[1].shape[2:],
pads=pad,
strides=func.stride,
group=func.groups,
),
def convert_Deconvolution2DFunction(
func, opset_version, input_names, output_names, context):
return onnx_helper.make_node(
'ConvTranspose', input_names, output_names,
kernel_shape=func.inputs[1].shape[2:],
output_shape=(func.outh, func.outw),
# pads: [x1_begin, x2_begin...x1_end, x2_end,...]
pads=(func.ph, func.pw, func.ph, func.pw),
strides=(func.sy, func.sx),
group=func.groups,
),
def convert_DeconvolutionND(
func, opset_version, input_names, output_names, context):
pad = []
x_ndim = len(func.inputs[0].shape)
w_ndim = len(func.inputs[1].shape)
for _ in range(x_ndim - w_ndim):
pad.append(0)
for p in func.pad:
pad.append(p)
pad = pad * 2
return onnx_helper.make_node(
'ConvTranspose', input_names, output_names,
kernel_shape=func.inputs[1].shape[2:],
output_shape=func.outs,
pads=pad,
strides=func.stride,
group=func.groups,
),
def convert_EmbedIDFunction(
func, opset_version, input_names, output_names, context):
x_index_name, W_name = input_names
input_names = [W_name, x_index_name]
if func.ignore_label is not None:
raise ValueError(
'Current ONNX doesn\'t support ignore_label for EmbedID.')
return onnx_helper.make_node(
'Gather', input_names, output_names, axis=0),
@support((1, 6, 7))
def convert_LinearFunction(
func, opset_version, input_names, output_names, context):
# When the func has bias
if len(func.inputs) == 2:
bias_dim = func.inputs[1].shape[0]
bias = np.zeros((bias_dim,), dtype=func.inputs[0].dtype)
bias_name = context.add_param(bias, 'bias')
input_names.append(bias_name)
if opset_version == 1 or opset_version == 6:
return onnx_helper.make_node(
'Gemm', input_names, output_names,
alpha=1.0, beta=1.0, broadcast=1, transA=0, transB=1),
elif opset_version == 7:
return onnx_helper.make_node(
'Gemm', input_names, output_names,
alpha=1.0, beta=1.0, transA=0, transB=1),
| 3,652
| 30.491379
| 70
|
py
|
chainer
|
chainer-master/onnx_chainer/functions/loss.py
|
import chainer
import numpy as np
from onnx_chainer.functions.opset_version import support
from onnx_chainer import onnx_helper
@support((9,))
def convert_SoftmaxCrossEntropy(
func, opset_version, input_names, output_names, context):
# obtain input variable
if not isinstance(func, chainer.FunctionNode):
raise NotImplementedError(
'SoftmaxCrossEntropy is currently supported for Chainer>=6.0.0a1.')
x_var, t_var = func.get_retained_inputs()
if len(x_var.shape) != 2:
raise NotImplementedError(
'ONNX-Chainer currently handles SoftmaxCrossEntropy only when '
'the dimension of input variable x is exactly two.')
if np.any(t_var.array == func.ignore_label):
raise NotImplementedError(
'ONNX-Chainer currently handles SoftmaxCrossEntropy only when '
'ignore_label is not used in input variable t.')
if (not func.normalize) or (func.class_weight is not None) or\
(func.ignore_label != -1) or (func.reduce != 'mean'):
raise NotImplementedError(
'ONNX-Chainer currently handles SoftmaxCrossEntropy only when '
'argument parameters are default setting.')
# create intermediate values
gb = onnx_helper.GraphBuilder()
x, t = input_names
y_log = gb.op('LogSoftmax', [x])
depth = context.add_const(np.array([x_var.shape[1]], dtype=np.int32),
'depth')
zeroone = context.add_const(np.array([0, 1], dtype=x_var.dtype), 'zeroone')
th = gb.op('OneHot', [t, depth, zeroone])
s0 = gb.op('Mul', [y_log, th])
sn = gb.op('Neg', [s0])
sr = gb.op('ReduceSum', [sn], axes=[1], keepdims=0)
gb.op_output_named('ReduceMean', [sr], output_names, axes=[0], keepdims=0)
return gb.nodes()
| 1,798
| 38.977778
| 79
|
py
|
chainer
|
chainer-master/onnx_chainer/functions/opset_version.py
|
def support(opset_versions):
"""Detect lowest supported version of the target converter
A simple wrap function for convert functions to detect lowest number of
supported opset version. For example, the target ONNX operater is added
from 6 and updated on 8, add this function as decorator like the below.
>>> @support((6, 8))
... def own_converter(func, opset_version, *args):
... print(opset_version)
>>>
>>> own_converter(None, 6)
6
>>> own_converter(None, 7)
6
>>> own_converter(None, 8)
8
>>> own_converter(None, 9)
8
>>> own_converter(None, 5)
RuntimeError: ONNX-Chainer cannot convert ...(snip)
Arguments:
opset_versions (tuple): Tuple of opset versions.
"""
def _wrapper(func):
def _func_with_lower_opset_version(*args, **kwargs):
if opset_versions is None:
return func(*args, **kwargs)
opset_version = args[1]
for opver in sorted(opset_versions, reverse=True):
if opver <= opset_version:
break
if opver > opset_version:
func_name = args[0].__class__.__name__
raise RuntimeError(
'ONNX-Chainer cannot convert `{}` of Chainer with ONNX '
'opset_version {}'.format(
func_name, opset_version))
opset_version = opver
return func(args[0], opset_version, *args[2:], **kwargs)
return _func_with_lower_opset_version
return _wrapper
| 1,571
| 33.173913
| 76
|
py
|
chainer
|
chainer-master/onnx_chainer/functions/noise.py
|
import chainer
from onnx_chainer.functions.opset_version import support
from onnx_chainer import onnx_helper
@support((1, 6, 7))
def convert_Dropout(func, opset_version, input_names, output_names, context):
if opset_version == 1:
return onnx_helper.make_node(
'Dropout', input_names, output_names,
is_test=0 if chainer.config.train else 1,
ratio=func.dropout_ratio,
consumed_inputs=[1]
),
elif opset_version == 6:
return onnx_helper.make_node(
'Dropout', input_names, output_names,
is_test=0 if chainer.config.train else 1,
ratio=func.dropout_ratio,
),
elif opset_version == 7:
return onnx_helper.make_node(
'Dropout', input_names, output_names,
ratio=func.dropout_ratio,
),
| 846
| 30.37037
| 77
|
py
|
chainer
|
chainer-master/onnx_chainer/functions/converter.py
|
class FunctionConverterParams(object):
def __init__(
self, func=None, opset_version=None, input_names=None,
output_names=None, context=None):
"""Wrapper of converter parameters
Exporter set this parameters to the target converter's argument.
>>> def own_converter(params):
... # params is FunctionConverterParams
... # so enable to get each attributes:
... func_name = params.func.__class__.__name__
Arguments:
func (~chainer.FunctionNode): Target function.
opset_version (int): Target opset version.
input_names (list): List of input names.
output_names (list): List of ouptut names.
context (~onnx_chainer.context.Context): Context for Exporting
"""
self.func = func
self.opset_version = opset_version
self.input_names = input_names
self.output_names = output_names
self.context = context
class FunctionConverter(object):
def __init__(self, converter):
"""Wrapper of ONNX-Chainer converter
Exporter set arguments wrapped by ``FunctionConverterParams``, and
this class breaks downs to each argument.
Arguments:
converter (function): The target converter function.
"""
self.converter = converter
def __call__(self, params):
func = params.func
opset_version = params.opset_version
input_names = params.input_names
output_names = params.output_names
context = params.context
return self.converter(
func, opset_version, input_names, output_names, context)
| 1,697
| 31.653846
| 74
|
py
|
chainer
|
chainer-master/onnx_chainer/functions/__init__.py
|
from onnx_chainer.functions.activation import convert_ClippedReLU # NOQA
from onnx_chainer.functions.activation import convert_ELU # NOQA
from onnx_chainer.functions.activation import convert_HardSigmoid # NOQA
from onnx_chainer.functions.activation import convert_LeakyReLU # NOQA
from onnx_chainer.functions.activation import convert_LogSoftmax # NOQA
from onnx_chainer.functions.activation import convert_PReLUFunction # NOQA
from onnx_chainer.functions.activation import convert_ReLU # NOQA
from onnx_chainer.functions.activation import convert_Selu # NOQA
from onnx_chainer.functions.activation import convert_Sigmoid # NOQA
from onnx_chainer.functions.activation import convert_Softmax # NOQA
from onnx_chainer.functions.activation import convert_Softplus # NOQA
from onnx_chainer.functions.activation import convert_Tanh # NOQA
from onnx_chainer.functions.array import convert_Cast # NOQA
from onnx_chainer.functions.array import convert_Concat # NOQA
from onnx_chainer.functions.array import convert_Copy # NOQA
from onnx_chainer.functions.array import convert_Depth2Space # NOQA
from onnx_chainer.functions.array import convert_Dstack # NOQA
from onnx_chainer.functions.array import convert_ExpandDims # NOQA
from onnx_chainer.functions.array import convert_GetItem # NOQA
from onnx_chainer.functions.array import convert_Hstack # NOQA
from onnx_chainer.functions.array import convert_Moveaxis # NOQA
from onnx_chainer.functions.array import convert_Pad # NOQA
from onnx_chainer.functions.array import convert_Permutate # NOQA
from onnx_chainer.functions.array import convert_Repeat # NOQA
from onnx_chainer.functions.array import convert_Reshape # NOQA
from onnx_chainer.functions.array import convert_ResizeImages # NOQA
from onnx_chainer.functions.array import convert_Rollaxis # NOQA
from onnx_chainer.functions.array import convert_SelectItem # NOQA
from onnx_chainer.functions.array import convert_Separate # NOQA
from onnx_chainer.functions.array import convert_Shape # NOQA
from onnx_chainer.functions.array import convert_Space2Depth # NOQA
from onnx_chainer.functions.array import convert_SplitAxis # NOQA
from onnx_chainer.functions.array import convert_Squeeze # NOQA
from onnx_chainer.functions.array import convert_Stack # NOQA
from onnx_chainer.functions.array import convert_Swapaxes # NOQA
from onnx_chainer.functions.array import convert_Tile # NOQA
from onnx_chainer.functions.array import convert_Transpose # NOQA
from onnx_chainer.functions.array import convert_TransposeSequence # NOQA
from onnx_chainer.functions.array import convert_Vstack # NOQA
from onnx_chainer.functions.array import convert_Where # NOQA
from onnx_chainer.functions.connection import convert_Convolution2DFunction # NOQA
from onnx_chainer.functions.connection import convert_ConvolutionND # NOQA
from onnx_chainer.functions.connection import convert_Deconvolution2DFunction # NOQA
from onnx_chainer.functions.connection import convert_DeconvolutionND # NOQA
from onnx_chainer.functions.connection import convert_EmbedIDFunction # NOQA
from onnx_chainer.functions.connection import convert_LinearFunction # NOQA
from onnx_chainer.functions.loss import convert_SoftmaxCrossEntropy # NOQA
from onnx_chainer.functions.math import convert_Absolute # NOQA
from onnx_chainer.functions.math import convert_Add # NOQA
from onnx_chainer.functions.math import convert_AddConstant # NOQA
from onnx_chainer.functions.math import convert_Arccos # NOQA
from onnx_chainer.functions.math import convert_Arcsin # NOQA
from onnx_chainer.functions.math import convert_Arctan # NOQA
from onnx_chainer.functions.math import convert_ArgMax # NOQA
from onnx_chainer.functions.math import convert_ArgMin # NOQA
from onnx_chainer.functions.math import convert_BroadcastTo # NOQA
from onnx_chainer.functions.math import convert_Clip # NOQA
from onnx_chainer.functions.math import convert_Cos # NOQA
from onnx_chainer.functions.math import convert_Cosh # NOQA
from onnx_chainer.functions.math import convert_Div # NOQA
from onnx_chainer.functions.math import convert_DivFromConstant # NOQA
from onnx_chainer.functions.math import convert_Exp # NOQA
from onnx_chainer.functions.math import convert_Identity # NOQA
from onnx_chainer.functions.math import convert_LinearInterpolate # NOQA
from onnx_chainer.functions.math import convert_Log # NOQA
from onnx_chainer.functions.math import convert_LogSumExp # NOQA
from onnx_chainer.functions.math import convert_MatMul # NOQA
from onnx_chainer.functions.math import convert_Max # NOQA
from onnx_chainer.functions.math import convert_Maximum # NOQA
from onnx_chainer.functions.math import convert_Mean # NOQA
from onnx_chainer.functions.math import convert_Min # NOQA
from onnx_chainer.functions.math import convert_Minimum # NOQA
from onnx_chainer.functions.math import convert_Mul # NOQA
from onnx_chainer.functions.math import convert_MulConstant # NOQA
from onnx_chainer.functions.math import convert_Neg # NOQA
from onnx_chainer.functions.math import convert_PowConstVar # NOQA
from onnx_chainer.functions.math import convert_PowVarConst # NOQA
from onnx_chainer.functions.math import convert_PowVarVar # NOQA
from onnx_chainer.functions.math import convert_Prod # NOQA
from onnx_chainer.functions.math import convert_RsqrtGPU # NOQA
from onnx_chainer.functions.math import convert_sign # NOQA
from onnx_chainer.functions.math import convert_Sin # NOQA
from onnx_chainer.functions.math import convert_Sinh # NOQA
from onnx_chainer.functions.math import convert_Sqrt # NOQA
from onnx_chainer.functions.math import convert_Square # NOQA
from onnx_chainer.functions.math import convert_Sub # NOQA
from onnx_chainer.functions.math import convert_SubFromConstant # NOQA
from onnx_chainer.functions.math import convert_Sum # NOQA
from onnx_chainer.functions.math import convert_Tan # NOQA
from onnx_chainer.functions.noise import convert_Dropout # NOQA
from onnx_chainer.functions.normalization import convert_BatchNormalization # NOQA
from onnx_chainer.functions.normalization import convert_FixedBatchNormalization # NOQA
from onnx_chainer.functions.normalization import convert_GroupNormalization # NOQA
from onnx_chainer.functions.normalization import convert_LocalResponseNormalization # NOQA
from onnx_chainer.functions.normalization import convert_NormalizeL2 # NOQA
from onnx_chainer.functions.pooling import convert_AveragePooling2D # NOQA
from onnx_chainer.functions.pooling import convert_AveragePoolingND # NOQA
from onnx_chainer.functions.pooling import convert_MaxPooling2D # NOQA
from onnx_chainer.functions.pooling import convert_MaxPoolingND # NOQA
from onnx_chainer.functions.pooling import convert_ROIPooling2D # NOQA
from onnx_chainer.functions.pooling import convert_Unpooling2D # NOQA
from onnx_chainer.functions.rnn import convert_n_step_gru # NOQA
| 6,902
| 61.189189
| 91
|
py
|
chainer
|
chainer-master/onnx_chainer/functions/rnn.py
|
import chainer
from onnx_chainer.functions.opset_version import support
from onnx_chainer import onnx_helper
@support((1, 6, 7))
def convert_n_step_gru(func, opset_version, input_names, output_names,
context):
n_layers, dropout_ratio, hx, ws, bs, xs = func.args
assert n_layers >= 1
hidden_size = hx.shape[2]
gb = onnx_helper.GraphBuilder()
hx_name = input_names[0]
offset = 1
ws_names = [[input_names[offset + i * 6 + j] for j in range(6)]
for i in range(n_layers)]
offset += 6 * n_layers
bs_names = [[input_names[offset + i * 6 + j] for j in range(6)]
for i in range(n_layers)]
offset += 6 * n_layers
xs_names = input_names[offset:]
split_outs = gb.op('Split', [hx_name], num_outputs=n_layers, axis=0)
if n_layers == 1:
split_outs = [split_outs]
# Removing layer dimention and adding num_directions cancels each other
hx_names = split_outs
hy_name, ys_name_list = \
func.reconstruct_return_value(output_names)
y_name = None
hy_names = []
for layer in range(n_layers):
if layer == 0:
# X; shape: (seq_length, batch_size, input_size)
x_name = gb.op(
'Concat',
[gb.op('Unsqueeze', [name], axes=[0]) for name in xs_names],
axis=0)
else:
if opset_version >= 7:
x_name = gb.op('Dropout', [y_name], ratio=dropout_ratio)
elif opset_version >= 6:
x_name = gb.op('Dropout', [y_name], ratio=dropout_ratio,
is_test=0 if chainer.config.train else 1)
else:
x_name = gb.op('Dropout', [y_name], ratio=dropout_ratio,
is_test=0 if chainer.config.train else 1,
consumed_inputs=[1])
# remove num_directions dimention
x_name = gb.op('Squeeze', [x_name], axes=[1])
w = ws_names[layer]
b = bs_names[layer]
# W[zrh]; shape: (num_directions, 3*hidden_size, input_size)
w_name = gb.op(
'Unsqueeze',
[gb.op('Concat', [w[1], w[0], w[2]], axis=0)],
axes=[0])
# R[zrh]; shape: (num_directions, 3*hidden_size, input_size)
r_name = gb.op(
'Unsqueeze',
[gb.op('Concat', [w[4], w[3], w[5]], axis=0)],
axes=[0])
# Wb[zrh], Rb[zrh]; shape: (num_directions, 6*hidden_size)
b_name = gb.op(
'Unsqueeze',
[gb.op('Concat', [b[1], b[0], b[2], b[4], b[3], b[5]], axis=0)],
axes=[0])
# Y; shape: (seq_length, num_directions, batch_size, hidden_size)
# Y_h; shape: (num_directions, batch_size, hidden_size)
y_name, hy_name_ = gb.op(
'GRU',
(x_name, w_name, r_name, b_name, "", hx_names[layer]),
hidden_size=hidden_size,
linear_before_reset=1,
num_outputs=2)
hy_names.append(hy_name_)
split_outs = gb.op(
'Split',
# remove num_directions dimention
[gb.op('Squeeze', [y_name], axes=[1])],
num_outputs=len(ys_name_list), axis=0)
if len(ys_name_list) == 1:
split_outs = [split_outs]
for i, node_name in enumerate(split_outs):
# remove seq_length dimention
gb.op_output_named('Squeeze', [node_name], [ys_name_list[i]], axes=[0])
# Removal of num_directions and new dimention for concatenation cancel
# each other.
gb.op_output_named('Concat', hy_names, [hy_name], axis=0)
return gb.nodes()
| 3,642
| 34.028846
| 79
|
py
|
chainer
|
chainer-master/onnx_chainer/functions/normalization.py
|
import sys
import chainer
import numpy as np
from onnx_chainer.functions.array import get_slice_node
from onnx_chainer.functions.opset_version import support
from onnx_chainer import onnx_helper
@support((1, 6, 7))
def convert_BatchNormalization(
func, opset_version, input_names, output_names, context):
is_fixed_bn = len(func.inputs) > 3
# NOTE: even if `use_beta=False` or `use_gamma=False`, beta or gamma
# are set in inputs by RetainHook,
beta_param = func.inputs[2].get_variable_or_none()
gamma_param = func.inputs[1].get_variable_or_none()
namedlink = context.get_link(beta_param) or context.get_link(gamma_param)
if namedlink is not None:
prefix, link = namedlink
if is_fixed_bn:
mean = link.avg_mean
var = link.avg_var
else:
# on train mode, avg_mean would be updated, so make them from x
x = func.inputs[0].get_variable().array
mean = x.mean(axis=func.axis)
var = x.var(axis=func.axis)
else:
prefix = None
if is_fixed_bn:
mean = func.inputs[3].get_variable().array
var = func.inputs[4].get_variable().array
else:
x = func.inputs[0].get_variable().array
mean = x.mean(axis=func.axis)
var = x.var(axis=func.axis)
def add_param(v, suffix):
if prefix is None:
return context.add_param(v, suffix)
else:
return context.add_param(
v, '{}_{}'.format(prefix, suffix), use_original_name=True)
if is_fixed_bn:
if context.implicit_inputs.pop(input_names[3], None) is not None:
mean_name = add_param(mean, 'avg_mean')
input_names[3] = mean_name
if context.implicit_inputs.pop(input_names[4], None) is not None:
var_name = add_param(var, 'avg_var')
input_names[4] = var_name
else:
maen_name = add_param(mean, 'avg_mean')
var_name = add_param(var, 'avg_var')
input_names.extend([maen_name, var_name])
momentum = getattr(func, 'decay', 0.)
# TODO(disktnk): On definition of ONNX's BatchNormalization operator,
# outputs one required output and four optional outputs. This converter
# must make 5 values for output and return them.
if opset_version == 1:
return onnx_helper.make_node(
'BatchNormalization', input_names, output_names,
epsilon=func.eps,
momentum=momentum,
is_test=not chainer.config.train,
consumed_inputs=[False, False, False, True, True],
),
elif opset_version == 6:
return onnx_helper.make_node(
'BatchNormalization', input_names, output_names,
epsilon=func.eps,
momentum=momentum,
is_test=not chainer.config.train,
),
elif opset_version == 7:
return onnx_helper.make_node(
'BatchNormalization', input_names, output_names,
epsilon=func.eps,
momentum=momentum,
),
@support((1, 6, 7))
def convert_FixedBatchNormalization(
func, opset_version, input_names, output_names, context):
return convert_BatchNormalization(
func, opset_version, input_names, output_names, context)
@support((5, 10))
def convert_GroupNormalization(
func, opset_version, input_names, output_names, context):
# drop opset < 5, to reduce supporting cost of old Reshape op
# calculation process is from
# https://github.com/chainer/chainer/blob/v6.2.0/chainer/functions/normalization/group_normalization.py # NOQA
# support dynamic batch size, but channel size is expected to be fixed
group = context.add_const(np.array(func.groups, dtype=np.int64), 'group')
eps = context.add_const(np.array(func.eps, dtype=np.float32), 'eps')
channel_size = context.add_const(
np.array([func.inputs[0].shape[1]], dtype=np.int64), 'channel')
neg_one = context.add_const(np.array([-1], dtype=np.int64), 'neg_one')
gb = onnx_helper.GraphBuilder()
# make reduced input
original_shape = gb.op('Shape', [input_names[0]])
batch_size = get_slice_node(
gb, opset_version, context, [original_shape], [0], [0], [1], [1])
batched_group = gb.op('Mul', [batch_size, group])
reduce_shape = gb.op('Concat', [batched_group, neg_one], axis=0)
reduced_x = gb.op('Reshape', [input_names[0], reduce_shape])
# calculate mean, var and x_hat
mean = gb.op('Unsqueeze', [
gb.op('ReduceMean', [reduced_x], axes=[1], keepdims=0)], axes=[1])
x_hat = gb.op('Sub', [reduced_x, mean])
var = gb.op('Add', [
gb.op(
'ReduceMean', [gb.op('Mul', [x_hat, x_hat])], axes=[1],
keepdims=0),
eps])
inv_std = gb.op('Unsqueeze', [
gb.op('Reciprocal', [gb.op('Sqrt', [var])])], axes=[1])
x_hat_ = gb.op('Mul', [x_hat, inv_std])
# make out y
groupless_shape = gb.op(
'Concat', [batch_size, channel_size, neg_one], axis=0)
y_org = gb.op('Reshape', [x_hat_, groupless_shape])
# gamma/beta
gamma = gb.op('Unsqueeze', [input_names[1]], axes=[1])
beta = gb.op('Unsqueeze', [input_names[2]], axes=[1])
y_g = gb.op('Mul', [y_org, gamma])
y_b = gb.op('Add', [y_g, beta])
gb.op('Reshape', [y_b, original_shape])
return gb.nodes(output_names)
def convert_LocalResponseNormalization(
func, opset_version, input_names, output_names, context):
size = int(func.n)
return onnx_helper.make_node(
'LRN', input_names, output_names,
alpha=float(func.alpha) * size,
beta=float(func.beta),
bias=float(func.k),
size=size,
),
def convert_NormalizeL2(
func, opset_version, input_names, output_names, context):
if isinstance(func.axis, tuple) and len(func.axis) != 1:
raise ValueError(
'Normalization along with multiple axes ({}) are not supported in '
'the ONNX\'s LpNormalization operator.'.format(func.axis))
if abs(func.eps - 1e-5) > sys.float_info.epsilon:
# default value of F.normaize eps is 1e-5
raise ValueError(
'\'eps\' is not supported in the ONNX\'s LpNormalization operator,'
' so that ONNX-Chainer does not accept custom values for \'eps\' '
'({})'.format(func.eps))
return onnx_helper.make_node(
'LpNormalization', input_names, output_names,
axis=int(func.axis[0]),
p=2,
),
| 6,532
| 35.294444
| 115
|
py
|
chainer
|
chainer-master/onnx_chainer/testing/test_onnxruntime.py
|
import glob
import os
import warnings
import numpy as np
import onnx
try:
import onnxruntime as rt
ONNXRUNTIME_AVAILABLE = True
except ImportError:
warnings.warn(
'ONNXRuntime is not installed. Please install it to use '
' the testing utility for ONNX-Chainer\'s converters.',
ImportWarning)
ONNXRUNTIME_AVAILABLE = False
def load_test_data(data_dir, input_names, output_names):
inout_values = []
for kind, names in [('input', input_names), ('output', output_names)]:
names = list(names)
values = {}
for pb in sorted(
glob.glob(os.path.join(data_dir, '{}_*.pb'.format(kind)))):
tensor = onnx.load_tensor(pb)
if tensor.name in names:
name = tensor.name
names.remove(name)
else:
name = names.pop(0)
values[name] = onnx.numpy_helper.to_array(tensor)
inout_values.append(values)
return tuple(inout_values)
def check_model_expect(test_path, input_names=None, rtol=1e-5, atol=1e-5):
if not ONNXRUNTIME_AVAILABLE:
raise ImportError('ONNX Runtime is not found on checking module.')
model_path = os.path.join(test_path, 'model.onnx')
with open(model_path, 'rb') as f:
onnx_model = onnx.load_model(f)
sess = rt.InferenceSession(onnx_model.SerializeToString())
rt_input_names = [value.name for value in sess.get_inputs()]
rt_output_names = [value.name for value in sess.get_outputs()]
# To detect unexpected inputs created by exporter, check input names
if input_names is not None:
assert list(sorted(input_names)) == list(sorted(rt_input_names))
test_data_sets = sorted([
p for p in os.listdir(test_path) if p.startswith('test_data_set_')])
for test_data in test_data_sets:
test_data_path = os.path.join(test_path, test_data)
assert os.path.isdir(test_data_path)
inputs, outputs = load_test_data(
test_data_path, rt_input_names, rt_output_names)
rt_out = sess.run(list(outputs.keys()), inputs)
for cy, my in zip(outputs.values(), rt_out):
np.testing.assert_allclose(cy, my, rtol=rtol, atol=atol)
| 2,228
| 34.380952
| 76
|
py
|
chainer
|
chainer-master/onnx_chainer/testing/test_mxnet.py
|
import collections
import os
import warnings
import chainer
import numpy as np
from onnx_chainer.testing.test_onnxruntime import load_test_data
try:
import mxnet
MXNET_AVAILABLE = True
except ImportError:
warnings.warn(
'MXNet is not installed. Please install mxnet to use '
'testing utility for compatibility checking.',
ImportWarning)
MXNET_AVAILABLE = False
def check_model_expect(test_path, input_names=None, rtol=1e-5, atol=1e-5):
if not MXNET_AVAILABLE:
raise ImportError('MXNet is not found on checking module.')
model_path = os.path.join(test_path, 'model.onnx')
sym, arg, aux = mxnet.contrib.onnx.import_model(model_path)
mx_input_names = [graph_input for graph_input in sym.list_inputs()
if graph_input not in arg and graph_input not in aux]
if input_names is not None:
assert list(sorted(input_names)) == list(sorted(mx_input_names))
test_data_sets = sorted([
p for p in os.listdir(test_path) if p.startswith('test_data_set_')])
for test_data in test_data_sets:
test_data_path = os.path.join(test_path, test_data)
assert os.path.isdir(test_data_path)
inputs, outputs = load_test_data(
test_data_path, mx_input_names, sym.list_outputs())
data_shapes = [(name, array.shape) for name, array in inputs.items()]
mod = mxnet.mod.Module(
symbol=sym, data_names=mx_input_names, context=mxnet.cpu(),
label_names=None)
mod.bind(
for_training=chainer.config.train,
data_shapes=data_shapes, label_shapes=None)
mod.set_params(
arg_params=arg, aux_params=aux, allow_missing=True,
allow_extra=True)
Batch = collections.namedtuple('Batch', ['data'])
mx_input = [mxnet.nd.array(array) for array in inputs.values()]
mod.forward(Batch(mx_input))
mx_outputs = [y.asnumpy() for y in mod.get_outputs()]
for cy, my in zip(outputs.values(), mx_outputs):
np.testing.assert_allclose(cy, my, rtol=rtol, atol=atol)
| 2,113
| 36.087719
| 77
|
py
|
chainer
|
chainer-master/onnx_chainer/testing/input_generator.py
|
import numpy as np
def shaped_range(*shape, dtype=np.float32):
r = np.arange(np.prod(shape))
r = r.reshape(shape)
return r
def _increasing_impl(*shape, dtype=np.float32, negative=True, bias=0):
r = shaped_range(*shape, dtype=dtype)
if negative:
r -= r.size // 2
if dtype in (np.float32, np.float64):
r = r * 0.5
r += bias
return r.astype(dtype)
def increasing(*shape, dtype=np.float32):
"""Returns a monotonically increasing ndarray for test inputs.
The output will contain both zero, negative numbers, and non
integer numbers for float dtypes. A test writer is supposed to
consider this function first.
Example:
>>> onnx_chainer.testing.input_generator.increasing(3, 4)
array([[-3. , -2.5, -2. , -1.5],
[-1. , -0.5, 0. , 0.5],
[ 1. , 1.5, 2. , 2.5]], dtype=float32)
Args:
shape (tuple of int): The shape of the output array.
dtype (numpy.dtype): The dtype of the output array.
Returns:
numpy.ndarray
"""
return _increasing_impl(*shape, dtype=dtype)
def nonzero_increasing(*shape, dtype=np.float32, bias=1e-7):
"""Returns a monotonically increasing ndarray for test inputs.
Similar to `increasing` but contains no zeros. Expected to be used
for divisors.
Example:
>>> onnx_chainer.testing.input_generator.nonzero_increasing(3, 4)
array([[-3.0000000e+00, -2.5000000e+00, -1.9999999e+00, -1.4999999e+00],
[-9.9999988e-01, -4.9999991e-01, 1.0000000e-07, 5.0000012e-01],
[ 1.0000001e+00, 1.5000001e+00, 2.0000000e+00, 2.5000000e+00]],
dtype=float32)
Args:
shape (tuple of int): The shape of the output array.
dtype (numpy.dtype): The dtype of the output array.
bias (float): The bias to avoid zero.
Returns:
numpy.ndarray
"""
assert dtype in (np.float32, np.float64)
return _increasing_impl(*shape, dtype=dtype, bias=bias)
def positive_increasing(*shape, dtype=np.float32, bias=1e-7):
"""Returns a monotonically increasing ndarray for test inputs.
Similar to `increasing` but contains only positive numbers. Expected
to be used for `math.log`, `math.sqrt`, etc.
Example:
>>> onnx_chainer.testing.input_generator.positive_increasing(3, 4)
array([[1.0000000e-07, 5.0000012e-01, 1.0000001e+00, 1.5000001e+00],
[2.0000000e+00, 2.5000000e+00, 3.0000000e+00, 3.5000000e+00],
[4.0000000e+00, 4.5000000e+00, 5.0000000e+00, 5.5000000e+00]],
dtype=float32)
Args:
shape (tuple of int): The shape of the output array.
dtype (numpy.dtype): The dtype of the output array.
bias (float): The bias to avoid zero.
Returns:
numpy.ndarray
"""
return _increasing_impl(*shape, dtype=dtype, negative=False, bias=bias)
| 2,876
| 29.935484
| 77
|
py
|
chainer
|
chainer-master/onnx_chainer/testing/get_test_data_set.py
|
import os
import onnx_chainer
TEST_OUT_DIR = 'out'
def gen_test_data_set(model, args, name, opset_version, **kwargs):
model.xp.random.seed(42)
test_path = os.path.join(
TEST_OUT_DIR, 'opset{}'.format(opset_version), name)
onnx_chainer.export_testcase(
model, args, test_path, opset_version=opset_version, **kwargs)
return test_path
| 369
| 22.125
| 70
|
py
|
chainer
|
chainer-master/onnx_chainer/testing/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/onnx_chainer/examples/resnet50/export.py
|
"""Example for exporting ResNet50 model to ONNX graph.
$ pwd
/path/to/onnx-chainer
$ python examples/resnet50/export.py -I target.jpg -O onnx_model
'model.onnx' will be output under 'onnx_model' directory.
"""
import argparse
import os
import chainer.cuda
import chainercv.links as C
from chainercv.transforms import center_crop
from chainercv.transforms import scale
from chainercv.utils import read_image
from onnx_chainer import export
from onnx_chainer import export_testcase
def export_onnx(input_image_path, output_path, gpu, only_output=True):
"""Export ResNet50 model to ONNX graph
'model.onnx' file will be exported under ``output_path``.
"""
model = C.ResNet50(pretrained_model='imagenet', arch='fb')
input_image = read_image(input_image_path)
input_image = scale(input_image, 256)
input_image = center_crop(input_image, (224, 224))
input_image -= model.mean
input_image = input_image[None, :]
if gpu >= 0:
model.to_gpu()
input_image = chainer.cuda.to_gpu(input_image)
if only_output:
os.makedirs(output_path, exist_ok=True)
name = os.path.join(output_path, 'model.onnx')
export(model, input_image, filename=name)
else:
# an input and output given by Chainer will be also emitted
# for using as test dataset
export_testcase(model, input_image, output_path)
if __name__ == '__main__':
this_file_path = os.path.dirname(os.path.abspath(__file__))
default_image_path = os.path.normpath(
os.path.join(this_file_path, '..', 'images'))
default_input_path = os.path.join(default_image_path, 'cat.jpg')
default_output_path = os.path.join('out', 'test_resnet50')
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-G', type=int, default=-1)
parser.add_argument('--input-image', '-I', default=default_input_path)
parser.add_argument('--output', '-O', default=default_output_path)
parser.add_argument('--enable-value-check', '-T', action='store_true')
args = parser.parse_args()
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
export_onnx(
args.input_image, args.output, args.gpu, not args.enable_value_check)
if args.enable_value_check:
from onnx_chainer.testing.test_onnxruntime import check_model_expect # NOQA
check_model_expect(args.output)
| 2,402
| 32.84507
| 84
|
py
|
chainer
|
chainer-master/onnx_chainer/examples/yolov2tiny/export.py
|
"""Example for exporting YOLOv2 Tiny model to ONNX graph.
$ pwd
/path/to/onnx-chainer
$ python examples/yolov2tiny/export.py -I target.jpg -O onnx_model
'model.onnx' will be output under 'onnx_model' directory.
NOTE: Outputs are required postprocessing to draw bbox on the target.jpg.
See ChainerCV's example of detection 'visualize_models.py'.
"""
import argparse
import os
import chainer.cuda
from chainercv.experimental.links import YOLOv2Tiny
from chainercv.utils import read_image
from onnx_chainer import export
from onnx_chainer import export_testcase
def export_onnx(input_image_path, output_path, gpu, only_output=True):
"""Export YOLOv2 Tiny model to ONNX graph
'model.onnx' file will be exported under ``output_path``.
"""
model = YOLOv2Tiny(pretrained_model='voc0712')
input_image = read_image(input_image_path)
input_image = input_image[None, :]
if gpu >= 0:
model.to_gpu()
input_image = chainer.cuda.to_gpu(input_image)
if only_output:
os.makedirs(output_path, exist_ok=True)
name = os.path.join(output_path, 'model.onnx')
export(
model, input_image, filename=name,
output_names=('locs', 'objs', 'confs'))
else:
# an input and output given by Chainer will be also emitted
# for using as test dataset
export_testcase(
model, input_image, output_path,
output_names=('locs', 'objs', 'confs'))
if __name__ == '__main__':
this_file_path = os.path.dirname(os.path.abspath(__file__))
default_image_path = os.path.normpath(
os.path.join(this_file_path, '..', 'images'))
default_input_path = os.path.join(default_image_path, 'cat.jpg')
default_output_path = os.path.join('out', 'test_yolo2tiny')
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-G', type=int, default=-1)
parser.add_argument('--input-image', '-I', default=default_input_path)
parser.add_argument('--output', '-O', default=default_output_path)
parser.add_argument('--enable-value-check', '-T', action='store_true')
args = parser.parse_args()
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
export_onnx(
args.input_image, args.output, args.gpu, not args.enable_value_check)
if args.enable_value_check:
from onnx_chainer.testing.test_onnxruntime import check_model_expect # NOQA
check_model_expect(args.output, atol=1e-3)
| 2,494
| 33.178082
| 84
|
py
|
chainer
|
chainer-master/tests/conftest.py
|
import os
import subprocess
import sys
from chainer import testing
from chainer.testing import parameterized
_pairwise_parameterize = (
os.environ.get('CHAINER_TEST_PAIRWISE_PARAMETERIZATION', 'never'))
assert _pairwise_parameterize in ('never', 'always')
def _is_pip_installed():
try:
import pip # NOQA
return True
except ImportError:
return False
def _is_in_ci():
ci_name = os.environ.get('CHAINER_CI', '')
return ci_name != ''
def pytest_configure(config):
# Print installed packages
if _is_in_ci() and _is_pip_installed():
print("***** Installed packages *****", flush=True)
subprocess.check_call([sys.executable, '-m', 'pip', 'freeze', '--all'])
def pytest_collection(session):
# Perform pairwise testing.
# TODO(kataoka): This is a tentative fix. Discuss its public interface.
if _pairwise_parameterize == 'always':
pairwise_product_dict = parameterized._pairwise_product_dict
testing.product_dict = pairwise_product_dict
parameterized.product_dict = pairwise_product_dict
def pytest_collection_finish(session):
if _pairwise_parameterize == 'always':
product_dict = parameterized._product_dict_orig
testing.product_dict = product_dict
parameterized.product_dict = product_dict
| 1,329
| 26.708333
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_variable.py
|
import copy
import inspect
import platform
import re
import sys
import unittest
import warnings
import mock
import numpy as np
import pytest
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
import chainer.functions as F
from chainer import initializers
from chainer import testing
from chainer.testing import attr
import chainer.testing.backend
from chainer import variable
import chainerx
if chainerx.is_available():
import chainerx.testing
class Constant(chainer.Function):
def __init__(self, outputs):
self.__outputs = outputs
def forward_cpu(self, inputs):
return self.__outputs
def forward_gpu(self, inputs):
return tuple(map(cuda.to_gpu, self.__outputs))
def backward_cpu(self, inputs, grad_outputs):
return tuple(map(np.zeros_like, inputs))
def backward_gpu(self, inputs, grad_outputs):
return tuple(map(cuda.cupy.zeros_like, inputs))
def constant(xs, value):
return Constant(value)(*xs)
def get_array(xp, arr):
if xp is np:
return arr
if xp is cuda.cupy:
return cuda.to_gpu(arr)
if xp is chainerx:
return chainerx.array(arr)
assert False
def get_variable(xp, arr):
return chainer.Variable(get_array(xp, arr))
class MulAdd(chainer.FunctionNode):
def forward(self, inputs):
self.retain_inputs((0, 1))
a, b, c = inputs
return a * b + c,
def backward_accumulate(self, target_input_indexes, grad_outputs,
grad_inputs):
a, b = self.get_retained_inputs()
g, = grad_outputs
ret = []
for i, g_in in zip(target_input_indexes, grad_inputs):
if i == 0:
ret.append(
g * b
if g_in is None else
muladd(g, b, g_in)
)
elif i == 1:
ret.append(
a * g
if g_in is None else
muladd(a, g, g_in)
)
elif i == 2:
ret.append(
g
if g_in is None else
g + g_in
)
else:
assert False
return tuple(ret)
def muladd(a, b, c):
return MulAdd().apply((a, b, c))[0]
_numpy_device = backend.CpuDevice()
_nonchainerx_backend_params = [
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
]
_chainerx_backend_params = [
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
_backend_params = [
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
@testing.parameterize(*(
testing.product({
'var_mapping': [(0, 1, 2)], # distinct
'in0_isvar_hasgrad': [(False, False), (True, False), (True, True)],
'in1_isvar_hasgrad': [(False, False), (True, False), (True, True)],
'in2_isvar_hasgrad': [(False, False), (True, False), (True, True)],
}) + testing.product({
'var_mapping': [
(0, 0, 1), # a == b != c
(0, 1, 0),
(0, 1, 1),
],
'in0_isvar_hasgrad': [(False, False), (True, False), (True, True)],
'in1_isvar_hasgrad': [(False, False), (True, False), (True, True)],
}) + testing.product({
'var_mapping': [(0, 0, 0)], # a == b == c
'in0_isvar_hasgrad': [(False, False), (True, False), (True, True)],
})
))
class TestBackwardAccumulate(unittest.TestCase):
shape = 3,
def setUp(self):
n = max(self.var_mapping) + 1
self.inputs_isvar_hasgrad = [
getattr(self, 'in{}_isvar_hasgrad'.format(i))
for i in range(n)]
shape = self.shape
self.inputs_data = [
np.random.randn(*shape).astype(np.float32)
for _ in range(n)]
self.inputs_grad = [
np.random.randn(*shape).astype(np.float32) if hasgrad else None
for _, hasgrad in self.inputs_isvar_hasgrad]
self.gy = np.random.randn(*shape).astype(np.float32)
def _get_inputs(self):
copied_data = [x.copy() for x in self.inputs_data]
copied_grad = [
None if g is None else g.copy() for g in self.inputs_data]
return [
chainer.Variable(x, grad=g) if isvar else x
for x, g, (isvar, _) in zip(
copied_data,
copied_grad,
self.inputs_isvar_hasgrad
)
]
def check_backward_accumulate(self, xp):
inputs = self._get_inputs()
a, b, c = [inputs[i] for i in self.var_mapping]
y = muladd(a, b, c)
y.grad = self.gy
y.backward()
inputs2 = self._get_inputs()
a2, b2, c2 = [inputs2[i] for i in self.var_mapping]
y2 = chainer.as_variable(a2 * b2 + c2)
y2.grad = self.gy
y2.backward()
tol = {'atol': 1e-4, 'rtol': 1e-4}
for x, x2, (isvar, _) in zip(
inputs, inputs2, self.inputs_isvar_hasgrad):
if isvar:
xp.testing.assert_allclose(x.grad, x2.grad, **tol)
def test_backward_accumulate_cpu(self):
self.check_backward_accumulate(np)
def _to_gpu(self):
self.inputs_data = [cuda.to_gpu(x) for x in self.inputs_data]
self.inputs_grad = [
None if g is None else cuda.to_gpu(g)
for g in self.inputs_grad]
self.gy = cuda.to_gpu(self.gy)
@attr.gpu
def test_backward_accumulate_gpu(self):
self._to_gpu()
self.check_backward_accumulate(cuda.cupy)
class TestVariableNode(unittest.TestCase):
def test_grad(self):
with pytest.raises(ValueError):
variable.VariableNode(chainer.Variable(), '', grad=None)
@testing.parameterize(
{'x_shape': (10,), 'c_shape': (2, 5), 'label': '(2, 5), float32'},
{'x_shape': (), 'c_shape': (1,), 'label': '(1), float32'},
)
class TestVariable(unittest.TestCase):
def setUp(self):
self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32)
self.a = np.random.uniform(0.1, 10, self.x_shape).astype(np.float32)
self.size = int(np.prod(self.x_shape))
self.c = np.arange(self.size).reshape(self.c_shape).astype(np.float32)
def test_numpy_init(self):
a = np.asarray(self.x)
x = chainer.Variable(a)
np.testing.assert_array_equal(x.array, a)
assert x._has_chainerx_array is False
assert isinstance(x.node, chainer.variable.VariableNode)
def test_numpy_init_unchecked(self):
a = np.asarray(self.x)
x = chainer.Variable._init_unchecked(a)
np.testing.assert_array_equal(x.array, a)
assert x._has_chainerx_array is False
assert isinstance(x.node, chainer.variable.VariableNode)
def test_numpy_init_unchecked_explicit(self):
a = np.asarray(self.x)
x = chainer.Variable._init_unchecked(a, is_chainerx_array=False)
np.testing.assert_array_equal(x.array, a)
assert x._has_chainerx_array is False
assert isinstance(x.node, chainer.variable.VariableNode)
@attr.chainerx
def test_chainerx_init(self):
a = chainerx.asarray(self.x)
x = chainer.Variable(a)
chainerx.testing.assert_array_equal(x.array, a)
assert x._has_chainerx_array is True
with pytest.raises(RuntimeError):
x.node
@attr.chainerx
def test_chainerx_init_unchecked(self):
a = chainerx.asarray(self.x)
x = chainer.Variable._init_unchecked(a)
chainerx.testing.assert_array_equal(x.array, a)
assert x._has_chainerx_array is True
with pytest.raises(RuntimeError):
x.node
@attr.chainerx
def test_chainerx_init_unchecked_explicit(self):
a = chainerx.asarray(self.x)
x = chainer.Variable._init_unchecked(a, is_chainerx_array=True)
chainerx.testing.assert_array_equal(x.array, a)
assert x._has_chainerx_array is True
with pytest.raises(RuntimeError):
x.node
def check_attributes(self, xp):
a = get_array(xp, self.x)
x = chainer.Variable(a)
xp.testing.assert_array_equal(x.array, a)
assert x.array is x.data
assert x.shape == self.x.shape
assert x.ndim == self.x.ndim
assert x.size == self.x.size
assert x.dtype == self.x.dtype
assert x.requires_grad
assert x._has_chainerx_array is (a is not None and xp is chainerx)
@attr.chainerx
def test_attributes_chainerx(self):
self.check_attributes(chainerx)
def test_attributes_cpu(self):
self.check_attributes(np)
@attr.gpu
def test_attributes_gpu(self):
self.check_attributes(cuda.cupy)
def test_uninitialized(self):
a = chainer.Variable(None)
assert a.xp is np
assert a._has_chainerx_array is False
def check_len(self, a):
x = chainer.Variable(a)
if x.ndim == 0:
pytest.raises(TypeError, x.__len__)
else:
assert len(x) == self.x_shape[0]
def test_len_cpu(self):
self.check_len(self.x)
@attr.gpu
def test_len_gpu(self):
self.check_len(cuda.to_gpu(self.x))
@attr.chainerx
def test_len_chainerx(self):
self.check_len(chainerx.array(self.x))
def check_get_item(self, a):
x = chainer.Variable(a)
if self.x_shape:
slices = slice(2, 5)
np.testing.assert_equal(backend.CpuDevice().send(x[slices].data),
backend.CpuDevice().send(self.x[slices]))
slices = slice(2, 5),
np.testing.assert_equal(backend.CpuDevice().send(x[slices].data),
backend.CpuDevice().send(self.x[slices]))
def test_get_item_cpu(self):
self.check_get_item(self.x)
@attr.gpu
def test_get_item_gpu(self):
self.check_get_item(cuda.to_gpu(self.x))
def check_label(self, expected, c):
c = chainer.Variable(c)
assert c.label == expected
def test_label_cpu(self):
self.check_label(self.label, self.c)
@attr.gpu
def test_label_gpu(self):
self.check_label(self.label, cuda.to_gpu(self.c))
def check_backward(self, inputs, intermediates, outputs, retain_grad):
# Test that `Variable.backward` writes gradients to correct Variables
# for a given computational graph (with `inputs`, `outputs`, and other
# `intermediate` variables). It is assumed that `outputs` do not depend
# each other.
intermediate_grads = [h.grad_var for h in intermediates]
output_grads = [y.grad_var for y in outputs]
for y in outputs:
y.backward(retain_grad)
assert all([x.grad_var is not None for x in inputs])
if retain_grad:
# intermediate grads should be computed
assert all([h.grad_var is not None for h in intermediates])
# output grads are also retained
assert all([
y.grad_var is gy_orig
for y, gy_orig in zip(outputs, output_grads)])
else:
# intermediate grads should not be touched
assert all([
h.grad_var is gh_orig
for h, gh_orig in zip(intermediates, intermediate_grads)])
# output grads are used (from Chainer v6)
assert all([y.grad_var is None for y in outputs])
# length is number of edges. So, # of Variables created is length+1
def create_linear_chain(self, length, xp):
v = get_variable(xp, self.x)
ret = [v]
for i in six.moves.range(length):
v = constant((ret[i], ), (self.a, ))
ret.append(v)
v.grad = xp.zeros_like(v.data)
return ret
def test_backward_cpu(self):
ret = self.create_linear_chain(2, np)
self.check_backward((ret[0], ), (ret[1], ), (ret[2], ), False)
def test_backward2_cpu(self):
ret = self.create_linear_chain(3, np)
ret[1].grad = ret[3].grad
self.check_backward((ret[0], ), (ret[1], ret[2]), (ret[3], ), False)
@attr.gpu
def test_backward_gpu(self):
ret = self.create_linear_chain(2, cuda.cupy)
self.check_backward((ret[0], ), (ret[1], ), (ret[2], ), False)
# TODO(kataoka): Variable.backward with ChainerX backend unexpectedly
# behaves like retain_grad=True
@pytest.mark.xfail(strict=True)
@attr.chainerx
def test_backward_chainerx(self):
ret = self.create_linear_chain(2, chainerx)
self.check_backward((ret[0], ), (ret[1], ), (ret[2], ), False)
def check_backward_accumulate(self, xp):
x = get_variable(xp, self.x)
y = x * x
y.grad = xp.zeros_like(y.data)
y.backward()
assert x.grad_var.shape == self.x_shape
def test_backward_accumulate_cpu(self):
self.check_backward_accumulate(np)
@attr.gpu
def test_backward_accumulate_gpu(self):
self.check_backward_accumulate(cuda.cupy)
@attr.chainerx
def test_backward_accumulate_chainerx(self):
self.check_backward_accumulate(chainerx)
def test_backward_cpu_retain_grad(self):
ret = self.create_linear_chain(2, np)
self.check_backward((ret[0], ), (ret[1], ), (ret[2], ), True)
def test_backward2_cpu_retain_grad(self):
ret = self.create_linear_chain(3, np)
ret[1].grad = ret[3].grad
self.check_backward((ret[0], ), (ret[1], ret[2]), (ret[3], ), True)
@attr.gpu
def test_backward_gpu_retain_grad(self):
ret = self.create_linear_chain(2, cuda.cupy)
self.check_backward((ret[0], ), (ret[1], ), (ret[2], ), True)
def check_double_backprop(self, xp):
x = get_variable(xp, self.x)
x.grad_var = None
y = x * x * x
y.grad = xp.ones_like(y.data)
y.backward(enable_double_backprop=True)
gx = x.grad_var
x.grad_var = None # clear grad
gx.grad = xp.ones_like(x.data)
gx.backward()
expect = 6 * x
testing.assert_allclose(x.grad_var.data, expect.data)
def test_double_backprop_cpu(self):
self.check_double_backprop(np)
@attr.gpu
def test_double_backprop_gpu(self):
self.check_double_backprop(cuda.cupy)
@attr.chainerx
def test_double_backprop_chainerx(self):
self.check_double_backprop(chainerx)
def test_backward_no_grad_required(self):
class DummyId(chainer.functions.math.identity.Identity):
def backward(self, a, b):
raise Exception('backward should not be called on inputs that '
'do not require grads')
x = chainer.Variable(self.x)
y1, y2 = DummyId().apply((x, x))
x.node._requires_grad = False
y1.backward()
def test_unchain(self):
ret = self.create_linear_chain(3, np)
old_rank = ret[1].rank
ret[1].unchain()
assert ret[1].creator is None
assert ret[1].rank == old_rank
self.check_backward((ret[1],), (ret[2],), (ret[3],), False)
def test_unchain_split(self):
if self.x.ndim == 0:
return
ret = get_variable(np, self.x)
ret.grad = np.zeros_like(ret.data)
y1, y2 = F.split_axis(ret, [5], axis=0)
y1.unchain()
z1, z2 = F.sum(y1), F.sum(y2)
w = z1 + z2
for var in [y1, y2, z1, z2, w]:
var.grad = np.zeros_like(var.data)
self.check_backward((ret, y1), (y2, z1, z2), (w,), False)
def check_set_none_to_creator(self, use_creator_node):
ret = self.create_linear_chain(3, np)
old_rank = ret[1].rank
if use_creator_node:
ret[1].creator_node = None
else:
ret[1].creator = None
assert ret[1].creator is None
assert ret[1].creator_node is None
assert ret[1].rank == old_rank
self.check_backward((ret[1],), (ret[2],), (ret[3],), False)
def test_set_none_to_creator(self):
self.check_set_none_to_creator(False)
def test_set_none_to_creator_node(self):
self.check_set_none_to_creator(True)
def test_set_none_and_original_to_creator(self):
ret = self.create_linear_chain(2, np)
old_rank = ret[1].rank
creator_node = ret[1].creator_node
ret[1].creator = None
assert ret[1].creator is None
assert ret[1].rank == old_rank
ret[1].node._rank = -1
ret[1].creator_node = creator_node
assert ret[1].creator_node is creator_node
assert ret[1].rank == creator_node.rank + 1
self.check_backward((ret[0],), (ret[1],), (ret[2],), False)
def test_set_fresh_creator(self):
v = chainer.Variable()
f = chainer.Function()
v.creator = f
assert v.creator is f
assert v.creator_node is f.node
assert v.rank == 1
def test_set_fresh_creator_node(self):
v = chainer.Variable()
f = chainer.FunctionNode()
v.creator_node = f
assert v.creator is f
assert v.creator_node is f
assert v.rank == 1
def test_unchain_backward_cpu(self):
ret = self.create_linear_chain(3, np)
ret[1].unchain_backward()
self.check_backward((ret[1], ), (ret[2], ), (ret[3], ), False)
@attr.gpu
def test_unchain_backward_gpu(self):
ret = self.create_linear_chain(3, cuda.cupy)
ret[1].unchain_backward()
self.check_backward((ret[1], ), (ret[2], ), (ret[3], ), False)
def test_unchain_backward_cpu_retain_grad(self):
ret = self.create_linear_chain(3, np)
ret[1].unchain_backward()
self.check_backward((ret[1], ), (ret[2], ), (ret[3], ), False)
@attr.gpu
def test_unchain_backward_gpu_retain_grad(self):
ret = self.create_linear_chain(3, np)
ret[1].unchain_backward()
self.check_backward((ret[1], ), (ret[2], ), (ret[3], ), False)
def test_invalid_value_type(self):
with six.assertRaisesRegex(self, TypeError, 'int'):
chainer.Variable(1)
def test_grad_type_check_pass(self):
a = chainer.Variable(np.empty((3,), dtype=np.float32))
a.grad = np.ndarray((3,), dtype=np.float32)
def test_grad_type_check_pass_type(self):
a = chainer.Variable(np.empty((), dtype=np.float32))
with pytest.raises(TypeError):
a.grad = np.float32()
@attr.gpu
def test_grad_type_check_type_cpu_gpu_mixture(self):
a = chainer.Variable(np.empty((3,), dtype=np.float32))
with pytest.raises(TypeError):
a.grad = cuda.cupy.empty((3,), dtype=np.float32)
def test_grad_type_check_dtype(self):
a = chainer.Variable(np.empty((3,), dtype=np.float32))
with pytest.raises(TypeError):
a.grad = np.empty((3,), dtype=np.float64)
def test_grad_type_check_shape(self):
a = chainer.Variable(np.empty((3,), dtype=np.float32))
with pytest.raises(ValueError):
a.grad = np.empty((2,), dtype=np.float32)
def check_cleargrad(self, a_data, fill=False):
xp = backend.get_array_module(a_data)
a = chainer.Variable(a_data)
if fill:
a.grad = xp.full_like(a_data, np.nan)
a.cleargrad()
assert a.grad is None
def test_cleargrad_cpu(self):
self.check_cleargrad(np.empty(3, dtype=np.float32))
def test_cleargrad_fill_cpu(self):
self.check_cleargrad(np.empty(3, dtype=np.float32), fill=True)
@attr.gpu
def test_cleargrad_gpu(self):
self.check_cleargrad(cuda.cupy.empty(3, dtype=np.float32))
@attr.gpu
def test_cleargrad_fill_gpu(self):
self.check_cleargrad(cuda.cupy.empty(3, dtype=np.float32), fill=True)
@attr.chainerx
def test_cleargrad_chainerx(self):
# TODO(hvy): Simplify to chainerx.empty(int, ...) when supported.
self.check_cleargrad(chainerx.empty((3,), dtype=np.float32))
@attr.chainerx
def test_cleargrad_fill_chainerx(self):
# TODO(hvy): Simplify to chainerx.empty(int, ...) when supported.
self.check_cleargrad(chainerx.empty((3,), dtype=np.float32), fill=True)
def test_addgrad_none_src_dst(self):
x = chainer.Variable(self.x)
y = chainer.Variable(self.x)
y.addgrad(x)
assert y.grad is None
def test_pickle_cpu(self):
x = chainer.Variable(self.x)
x.grad = np.ones_like(x.data)
binary = six.moves.cPickle.dumps(x)
d = six.moves.cPickle.loads(binary)
np.testing.assert_array_equal(x.data, d.data)
np.testing.assert_array_equal(x.grad, d.grad)
@attr.gpu
def test_pickle_gpu(self):
cp = cuda.cupy
x = chainer.Variable(self.x)
x.grad = np.ones_like(x.data)
x.to_gpu()
binary = six.moves.cPickle.dumps(x)
d = six.moves.cPickle.loads(binary)
cp.testing.assert_array_equal(x.data, d.data)
cp.testing.assert_array_equal(x.grad, d.grad)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.parameterize(*testing.product({'shape': [(10,), (0,), ()]}))
class TestVariableCopydata(unittest.TestCase):
def test_copydata(self, src_backend_config, dst_backend_config):
shape = self.shape
dtype = np.float32
src_arr_numpy = np.asarray(np.random.randn(*shape), dtype)
dst_arr_numpy = np.asarray(np.random.randn(*shape), dtype)
src_arr = src_backend_config.get_array(src_arr_numpy.copy())
dst_arr = dst_backend_config.get_array(dst_arr_numpy.copy())
src_var = chainer.Variable(src_arr)
dst_var = chainer.Variable(dst_arr)
src_arr_prev = src_var.array
dst_arr_prev = dst_var.array
dst_var.copydata(src_var)
assert src_var.device == src_backend_config.device
assert dst_var.device == dst_backend_config.device
assert dst_var.array is dst_arr_prev
assert src_var.array is src_arr_prev
assert dst_var.dtype == dtype
np.testing.assert_array_equal(
_numpy_device.send(dst_var.array), src_arr_numpy)
np.testing.assert_array_equal(
_numpy_device.send(src_var.array), src_arr_numpy)
def test_copydata_to_uninitialized_parameter(
self, src_backend_config, dst_backend_config):
shape = self.shape
dtype = np.float32
src_arr_numpy = np.asarray(np.random.randn(*shape), dtype)
src_arr = src_backend_config.get_array(src_arr_numpy.copy())
dst_var = chainer.Parameter()
dst_var.to_device(dst_backend_config.device)
src_var = chainer.Parameter(src_arr)
src_arr_prev = src_var.array
dst_var.copydata(src_var)
assert src_var.array is src_arr_prev
assert src_var.device == src_backend_config.device
assert dst_var.device == dst_backend_config.device
np.testing.assert_array_equal(
_numpy_device.send(dst_var.data), src_arr_numpy)
def test_copydata_from_uninitialized_parameter(
self, src_backend_config, dst_backend_config):
shape = self.shape
dtype = np.float32
dst_arr_numpy = np.asarray(np.random.randn(*shape), dtype)
dst_arr = dst_backend_config.get_array(dst_arr_numpy.copy())
initializer = initializers.Zero()
dst_var = chainer.Parameter(dst_arr)
src_var = chainer.Parameter(initializer)
src_var.to_device(src_backend_config.device)
dst_arr_prev = dst_var.array
dst_var.copydata(src_var)
assert src_var.device == src_backend_config.device
assert dst_var.device == dst_backend_config.device
assert dst_var.array is dst_arr_prev
np.testing.assert_array_equal(
_numpy_device.send(dst_var.array),
_numpy_device.send(src_var.array))
def test_copydata_from_to_uninitialized_parameters(
self, src_backend_config, dst_backend_config):
dst_var = chainer.Parameter()
src_var = chainer.Parameter()
src_var.to_device(src_backend_config.device)
dst_var.to_device(dst_backend_config.device)
dst_var.copydata(src_var)
assert src_var.device == src_backend_config.device
assert dst_var.device == dst_backend_config.device
assert src_var.array is None
assert dst_var.array is None
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.parameterize(*testing.product(
{
'shape': [(10,), (0,), ()],
'requires_grad': [True, False],
}
))
class TestVariableGrad(unittest.TestCase):
def test_grad(self, backend_config):
x = backend_config.get_array(
np.random.uniform(-1, 1, self.shape).astype(np.float32))
g = backend_config.get_array(
np.random.uniform(0.1, 10, self.shape).astype(np.float32))
v = chainer.Variable(x, requires_grad=self.requires_grad)
expected_error = (
backend_config.xp is chainerx
and not self.requires_grad)
if expected_error:
with pytest.raises(Exception):
v.grad = g
else:
v.grad = g
assert v.grad_var.requires_grad is True
assert v.grad is not None
assert v.requires_grad == self.requires_grad
backend_config.xp.testing.assert_array_equal(v.grad, g)
def check_grad_var(self, backend_config, grad_var_requires_grad):
x = backend_config.get_array(
np.random.uniform(-1, 1, self.shape).astype(np.float32))
g = backend_config.get_array(
np.random.uniform(0.1, 10, self.shape).astype(np.float32))
v = chainer.Variable(x, requires_grad=self.requires_grad)
gv = chainer.Variable(g, requires_grad=grad_var_requires_grad)
expected_error = (
backend_config.xp is chainerx
and not self.requires_grad)
if expected_error:
with pytest.raises(Exception):
v.grad_var = gv
else:
v.grad_var = gv
assert v.requires_grad == self.requires_grad
backend_config.xp.testing.assert_array_equal(v.grad, g)
# Same instance should be returned each time.
assert v.grad_var is gv
def test_grad_var(self, backend_config):
self.check_grad_var(backend_config, True)
self.check_grad_var(backend_config, False)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.parameterize(*testing.product(
{
'shape': [(10,), (0,), ()],
'grad_var_requires_grad': [True, False],
'fill': [True, False],
}
))
class TestVariableZerogad(unittest.TestCase):
def test_zerograd(self, backend_config):
shape = self.shape
dtype = np.float32
expect_error = (
backend_config.xp is chainerx
and self.fill
and self.grad_var_requires_grad)
xp = backend_config.xp
a = chainer.Variable(
backend_config.get_array(np.empty(shape, dtype)))
if self.fill:
a.grad_var = chainer.Variable(
backend_config.get_array(np.full(shape, np.nan, dtype)),
requires_grad=self.grad_var_requires_grad)
if xp is not chainerx:
a.grad_var.creator_node = chainer.FunctionNode()
with testing.assert_warns(DeprecationWarning):
if expect_error:
with pytest.raises(Exception):
a.zerograd()
return
a.zerograd()
assert a.grad is not None
if self.fill and xp is not chainerx:
assert a.grad_var.creator_node is None
xp.testing.assert_array_equal(a.grad, xp.zeros_like(a.grad))
class VariableAddgradTestBase(object):
def check_addgrad(
self, should_succeed,
src_backend_config, dst_backend_config, current_backend_config):
src_device = src_backend_config.device
dst_device = dst_backend_config.device
src_np = np.full(3, 10, dtype=np.float32)
dst_np = np.full(3, 20, dtype=np.float32)
if self.clear_src_grad:
expect_np = np.full(3, 20, dtype=np.float32)
elif self.clear_dst_grad:
expect_np = np.full(3, 10, dtype=np.float32)
else:
expect_np = np.full(3, 30, dtype=np.float32)
src = src_device.send(src_np)
dst = dst_device.send(dst_np)
a = chainer.Variable(src)
a.grad = src
b = chainer.Variable(dst)
b.grad = dst
if self.clear_src_grad:
a.cleargrad()
if self.clear_dst_grad:
b.cleargrad()
with current_backend_config:
if should_succeed:
b.addgrad(a)
else:
with pytest.raises(RuntimeError):
b.addgrad(a)
if should_succeed:
np.testing.assert_array_equal(
_numpy_device.send(b.grad), expect_np)
assert backend.get_device_from_array(b.data) == dst_device
assert backend.get_device_from_array(b.grad) == dst_device
addgrad_test_parameterize = testing.parameterize(*testing.product(
{
'clear_src_grad,clear_dst_grad': [
[False, False],
[True, False],
[False, True],
],
}))
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _nonchainerx_backend_params)
@testing.backend.inject_backend_tests(None, _nonchainerx_backend_params)
@addgrad_test_parameterize
class TestVariableAddgradNonChainerx(
VariableAddgradTestBase, unittest.TestCase):
def test_addgrad(
self, src_backend_config, dst_backend_config,
current_backend_config):
self.check_addgrad(
True, src_backend_config, dst_backend_config,
current_backend_config)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _chainerx_backend_params)
@testing.backend.inject_backend_tests(None, _chainerx_backend_params)
@addgrad_test_parameterize
class TestVariableAddgradChainerx(
VariableAddgradTestBase, unittest.TestCase):
def test_addgrad(
self, src_backend_config, dst_backend_config,
current_backend_config):
self.check_addgrad(
True, src_backend_config, dst_backend_config,
current_backend_config)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _nonchainerx_backend_params)
@testing.backend.inject_backend_tests(None, _chainerx_backend_params)
@addgrad_test_parameterize
class TestVariableAddgradBetweenNonChainerxAndChainerx(
VariableAddgradTestBase, unittest.TestCase):
def test_unsupported_addgrad(
self, src_backend_config, dst_backend_config,
current_backend_config):
self.check_addgrad(
False, src_backend_config, dst_backend_config,
current_backend_config)
@testing.parameterize(
{'array_require_grad': False, 'requires_grad': 'default',
'expected': True},
{'array_require_grad': False, 'requires_grad': False, 'expected': False},
{'array_require_grad': False, 'requires_grad': True, 'expected': True},
{'array_require_grad': True, 'requires_grad': 'default',
'expected': True},
{'array_require_grad': True, 'requires_grad': False, 'expected': 'raise'},
{'array_require_grad': True, 'requires_grad': True, 'expected': True},
)
@attr.chainerx
class TestVariableChainerXInitRequiresGrad(unittest.TestCase):
def test_chainerx_init_requires_grad(self):
x = chainerx.ones((2,), dtype=np.float32)
if self.array_require_grad:
x.require_grad()
def v():
if self.requires_grad == 'default':
return chainer.Variable(x)
else:
return chainer.Variable(x, requires_grad=self.requires_grad)
if self.expected == 'raise':
with pytest.raises(ValueError):
v()
else:
assert v().requires_grad is self.expected
@testing.parameterize(
{'x_shape': (10,)},
{'x_shape': ()},
)
class TestVariableToCpu(unittest.TestCase):
def setUp(self):
self.x = np.zeros(self.x_shape, dtype=np.float32)
self.gx = np.ones_like(self.x)
def check_to_cpu(self, x, gx, requires_grad=True):
x_var = chainer.Variable(x, requires_grad=requires_grad)
set_grad_var = requires_grad or not isinstance(x, chainerx.ndarray)
if set_grad_var:
x_var.grad_var = chainer.Variable(gx, requires_grad=requires_grad)
x_var.to_cpu()
assert x_var.xp is np
assert x_var._has_chainerx_array is False
assert x_var.node is not None
assert isinstance(x_var.data, np.ndarray)
assert x.shape == x_var.shape
assert x.dtype == x_var.dtype
np.testing.assert_array_equal(
backend.CpuDevice().send(x_var.data), backend.CpuDevice().send(x))
if set_grad_var:
assert isinstance(x_var.grad, np.ndarray)
assert gx.shape == x_var.grad.shape
assert gx.dtype == x_var.grad.dtype
np.testing.assert_array_equal(
backend.CpuDevice().send(x_var.grad),
backend.CpuDevice().send(gx))
assert x_var.grad_var is not None
assert x_var.grad_var.node is not None
else:
assert x_var.grad is None
assert x_var.grad_var is None
orig_xp = backend.get_array_module(x, gx)
if orig_xp is np:
assert x_var.data is x
assert x_var.grad is gx
else:
assert x_var.data is not x
assert not set_grad_var or x_var.grad is not gx
assert x_var.xp is not chainerx
assert x_var._has_chainerx_array is False
def test_to_cpu_from_cpu(self):
self.check_to_cpu(self.x, self.gx)
@attr.gpu
def test_to_cpu_from_gpu(self):
self.check_to_cpu(cuda.to_gpu(self.x), cuda.to_gpu(self.gx))
@attr.chainerx
def test_to_cpu_from_chainerx(self):
self.check_to_cpu(
chainerx.array(self.x),
chainerx.array(self.gx),
requires_grad=False)
@attr.chainerx
def test_to_cpu_from_chainerx_requiring_grad(self):
with self.assertRaises(RuntimeError):
self.check_to_cpu(
chainerx.array(self.x),
chainerx.array(self.gx),
requires_grad=True)
@testing.parameterize(
{'x_shape': (10,)},
{'x_shape': ()},
)
@attr.gpu
class TestVariableToGpu(unittest.TestCase):
def setUp(self):
self.x = np.zeros(self.x_shape, dtype=np.float32)
self.gx = np.ones_like(self.x)
def check_to_gpu(self, x, gx, device_id=None, requires_grad=True):
x_var = chainer.Variable(x, requires_grad=requires_grad)
set_grad_var = requires_grad or not isinstance(x, chainerx.ndarray)
if set_grad_var:
x_var.grad_var = chainer.Variable(gx, requires_grad=requires_grad)
x_var.to_gpu(device_id)
if device_id is None:
expected_device_id = cuda.cupy.cuda.runtime.getDevice()
else:
expected_device_id = device_id
expected_device = cuda.GpuDevice.from_device_id(expected_device_id)
assert x_var.xp is cuda.cupy
assert x_var._has_chainerx_array is False
assert x_var.node is not None
assert isinstance(x_var.data, cuda.cupy.ndarray)
assert x_var.data.device.id == expected_device_id
assert x_var.device == expected_device
assert x.shape == x_var.shape
assert x.dtype == x_var.dtype
np.testing.assert_array_equal(
backend.CpuDevice().send(x_var.data), backend.CpuDevice().send(x))
if set_grad_var:
assert isinstance(x_var.grad, cuda.cupy.ndarray)
assert x_var.grad.device.id == expected_device_id
assert x_var.grad_var.device == expected_device
assert gx.shape == x_var.grad.shape
assert gx.dtype == x_var.grad.dtype
np.testing.assert_array_equal(
backend.CpuDevice().send(x_var.grad),
backend.CpuDevice().send(gx))
assert x_var.grad_var is not None
assert x_var.grad_var.node is not None
else:
assert x_var.grad is None
assert x_var.grad_var is None
orig_device = backend.get_device_from_array(x)
if orig_device == expected_device:
assert x_var.data is x
assert x_var.grad is gx
else:
assert x_var.data is not x
assert not set_grad_var or x_var.grad is not gx
assert x_var.xp is not chainerx
def test_to_gpu_from_cpu(self):
self.check_to_gpu(self.x, self.gx)
def test_to_gpu_from_gpu(self):
self.check_to_gpu(cuda.to_gpu(self.x), cuda.to_gpu(self.gx))
@attr.multi_gpu(2)
def test_to_gpu_from_another_gpu(self):
self.check_to_gpu(cuda.to_gpu(self.x), cuda.to_gpu(self.gx), 1)
@attr.chainerx
def test_to_gpu_from_chainerx(self):
self.check_to_gpu(
chainerx.array(self.x),
chainerx.array(self.gx),
requires_grad=False)
@attr.chainerx
def test_to_gpu_from_chainerx_requiring_grad(self):
with self.assertRaises(RuntimeError):
self.check_to_gpu(
chainerx.array(self.x),
chainerx.array(self.gx),
requires_grad=True)
@testing.parameterize(
{'x_shape': (10,)},
{'x_shape': ()},
)
@attr.chainerx
class TestVariableToChainerX(unittest.TestCase):
def setUp(self):
self.x = np.zeros(self.x_shape, dtype=np.float32)
self.gx = np.ones_like(self.x)
def infer_expected_device(self, *arrays):
xp = backend.get_array_module(*arrays)
if xp is np:
return chainerx.get_device('native', 0)
elif xp is cuda.cupy:
return chainerx.get_device('cuda', arrays[0].device.id)
elif xp is chainerx:
return arrays[0].device
assert False
def check_to_chx(self, x, gx, requires_grad=True):
x_var = chainer.Variable(x, requires_grad=requires_grad)
x_var.grad_var = chainer.Variable(gx, requires_grad=requires_grad)
x_var.to_chx()
expected_device = self.infer_expected_device(x, gx)
assert x_var.xp is chainerx
assert x_var._has_chainerx_array is True
with pytest.raises(RuntimeError):
x_var.node
assert isinstance(x_var.array, chainerx.ndarray)
assert x.shape == x_var.shape
assert x.dtype == x_var.dtype
assert x_var.data.device is expected_device
np.testing.assert_array_equal(
backend.CpuDevice().send(x_var.data), backend.CpuDevice().send(x))
if requires_grad:
assert isinstance(x_var.grad, chainerx.ndarray)
assert gx.shape == x_var.grad.shape
assert gx.dtype == x_var.grad.dtype
assert x_var.grad.device is expected_device
np.testing.assert_array_equal(
backend.CpuDevice().send(x_var.grad),
backend.CpuDevice().send(gx))
assert x_var.grad_var is not None
with pytest.raises(RuntimeError):
x_var.grad_var.node
else:
assert x_var.grad is None
assert x_var.grad_var is None
assert x_var.xp is chainerx
assert x_var._has_chainerx_array is True
def test_to_chx_from_numpy(self):
self.check_to_chx(self.x, self.gx)
@attr.gpu
def test_to_chx_from_cupy(self):
self.check_to_chx(cuda.to_gpu(self.x), cuda.to_gpu(self.gx))
# TODO(hvy): Write test when implemented.
@attr.ideep
def test_ideep_to_chx(self):
raise unittest.SkipTest('Not yet supported')
def test_to_chx_from_chx(self):
self.check_to_chx(
chainerx.array(self.x), chainerx.array(self.gx))
def test_to_chx_from_another_device(self):
self.check_to_chx(
chainerx.array(self.x), chainerx.array(self.gx))
def test_to_chx_not_requiring_grad(self):
self.check_to_chx(self.x, self.gx, requires_grad=False)
def test_to_chx_with_creator(self):
x = chainer.Variable(self.x)
y = x * x
with self.assertRaises(RuntimeError):
y.to_chx()
@testing.parameterize(
{'x_shape': (10,)},
{'x_shape': ()},
)
@chainer.testing.backend.inject_backend_tests(
['test_from_chx'],
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
@attr.chainerx
class TestVariableFromChainerX(unittest.TestCase):
def setUp(self):
self.x = chainerx.zeros(self.x_shape, dtype=np.float32)
def infer_expected_xp_and_device(self, x):
xp = backend.get_array_module(x)
if xp is np:
return xp, None
elif xp is cuda.cupy:
return xp, x.device
elif xp is chainerx:
backend_name = x.device.backend.name
if backend_name == 'native':
return np, None
elif backend_name == 'cuda':
return cuda.cupy, cuda.cupy.cuda.Device(x.device.index)
assert False
def test_from_chx(self, backend_config):
x = backend_config.get_array(self.x)
x_var = chainer.Variable(x, requires_grad=False)
x_var.from_chx()
expected_xp, expected_device = self.infer_expected_xp_and_device(x)
assert x_var.xp is expected_xp
assert x_var._has_chainerx_array is (expected_xp is chainerx)
assert x_var.node is not None
assert isinstance(x_var.array, expected_xp.ndarray)
assert expected_device is None or x_var.array.device == expected_device
assert x.shape == x_var.shape
assert x.dtype == x_var.dtype
assert x_var.grad is None
assert x_var.grad_var is None
np.testing.assert_array_equal(
backend.CpuDevice().send(x_var.array), backend.CpuDevice().send(x))
def test_invalid_from_chx_requires_grad(self):
x = chainer.Variable(self.x, requires_grad=True)
with self.assertRaises(RuntimeError):
x.from_chx()
@testing.parameterize(
{'x_shape': (10,)},
{'x_shape': ()},
)
@attr.chainerx
class TestVariableToDevice(unittest.TestCase):
def setUp(self):
self.x = np.zeros(self.x_shape, dtype=np.float32)
self.gx = np.ones_like(self.x)
def check_to_device(self, x, gx, device_spec, expected_xp):
x_var = chainer.Variable(x)
x_var.grad_var = chainer.Variable(gx)
x_var.to_device(device_spec)
assert x_var.xp is expected_xp
assert x_var._has_chainerx_array is (expected_xp is chainerx)
assert x_var.grad_var.xp is expected_xp
assert x_var.grad_var._has_chainerx_array is (expected_xp is chainerx)
def test_to_device_numpy(self):
self.check_to_device(self.x, self.gx, '@numpy', np)
@attr.gpu
def test_to_device_cupy(self):
self.check_to_device(self.x, self.gx, '@cupy:0', cuda.cupy)
@attr.chainerx
def test_to_device_chainerx(self):
self.check_to_device(self.x, self.gx, 'native:0', chainerx)
@testing.parameterize(*testing.product(
{
'x_shape': [(10,), (), None],
'requires_grad': [True, False],
}))
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestVariableToDeviceTwice(unittest.TestCase):
def setUp(self):
if self.x_shape is None:
self.x = None
else:
self.x = np.zeros(self.x_shape, dtype=np.float32)
def test_to_device_twice(self, backend_config1, backend_config2):
device1 = backend_config1.device
device2 = backend_config2.device
var = chainer.Variable(self.x, requires_grad=self.requires_grad)
# Transfer to device 1
var.to_device(device1)
# Transfer to device 2
should_fail = (
self.requires_grad
and self.x is not None
and device1.xp is chainerx
and device2.xp is not chainerx)
if should_fail:
# Non-ChainerX device to ChainerX device should fail if
# requires_grad
with pytest.raises(RuntimeError):
var.to_device(device2)
else:
# Should succeed
var.to_device(device2)
assert var.requires_grad == self.requires_grad
if self.x is None:
assert var.array is None
assert var.data is None
else:
assert isinstance(var.array, device2.xp.ndarray)
assert backend.get_device_from_array(var.array) == device2
np.testing.assert_array_equal(
self.x,
backend.CpuDevice().send(var.array))
class TestVariableBasic(unittest.TestCase):
def test_unhashable(self):
a = chainer.Variable(np.ones((2,)))
with six.assertRaisesRegex(self, TypeError, '^unhashable type: '):
hash(a)
def test_unequatable(self):
a = chainer.Variable(np.ones((2,)))
b = chainer.Variable(np.ones((2,)))
with pytest.raises(TypeError):
a == b
with pytest.raises(TypeError):
a == a
with pytest.raises(TypeError):
a != b
with pytest.raises(TypeError):
a != a
def test_uncomparable(self):
a = chainer.Variable(np.ones((2,)))
b = chainer.Variable(np.ones((2,)))
with pytest.raises(TypeError):
a < b
with pytest.raises(TypeError):
a <= b
with pytest.raises(TypeError):
a > b
with pytest.raises(TypeError):
a >= b
def test_bool_inconvertible(self):
a = chainer.Variable(np.ones((2,)))
with pytest.raises(TypeError):
if a:
pass
with pytest.raises(TypeError):
if not a:
pass
class TestVariableDataAssign(unittest.TestCase):
def test_variable_data_assign(self):
x = chainer.Variable(np.ones((3, 2), np.float32))
chainer.functions.sin(x)
x.data = np.ones((2, 4), np.float64)
assert x.data.shape == (2, 4)
assert x.data.dtype == np.float64
assert x.shape == (2, 4)
assert x.dtype == np.float64
assert x.node.shape == (2, 4)
assert x.node.dtype == np.float64
assert x.node.data.shape == (2, 4)
assert x.node.data.dtype == np.float64
@attr.gpu
def test_to_gpu(self):
x = chainer.Variable(np.ones((3, 2), np.float32))
chainer.functions.sin(x)
x.to_gpu()
assert x.data is x.node.data
x.to_cpu()
assert x.data is x.node.data
@attr.ideep
def test_to_intel64(self):
x = chainer.Variable(np.ones((3, 2), np.float32))
chainer.functions.sin(x)
x.to_intel64()
assert x.data is x.node.data
x.to_cpu()
assert x.data is x.node.data
class TestParameter(unittest.TestCase):
def setUp(self):
self.a = np.random.rand(3, 2).astype(np.float32)
def test_initializer(self):
x = chainer.Parameter(shape=(1,))
assert x.initializer is not None
def test_initialize_by_scalar(self):
x = chainer.Parameter(2., (3,))
np.testing.assert_array_equal(x.data, np.array([2., 2., 2.]))
def test_initialize_by_initializer(self):
x = chainer.Parameter(initializers.One(), (3,))
np.testing.assert_array_equal(
x.data, np.array([1., 1., 1.], dtype='f'))
def test_initialize_by_none(self):
x = chainer.Parameter(None, (3,))
np.testing.assert_array_equal(
x.data, np.full((3,), np.nan, dtype='f'))
def test_initialize_by_array(self):
data = np.array([1., 2., 3.], dtype='f')
x = chainer.Parameter(data)
assert x.data is data
@attr.gpu
def test_initialize_by_cupy_array(self):
data = cuda.cupy.array([1., 2., 3.], dtype='f')
x = chainer.Parameter(data, (3,))
assert isinstance(x.data, cuda.cupy.ndarray)
cuda.cupy.testing.assert_array_equal(x.data, data)
@attr.chainerx
def test_initialize_by_chainerx_array(self):
data = chainerx.array([1., 2., 3.], dtype='f')
x = chainer.Parameter(data)
assert isinstance(x.data, chainerx.ndarray)
chainerx.testing.assert_array_equal(x.data, data)
def test_update_rule(self):
update_rule = mock.MagicMock()
g = self.a.copy()
x = chainer.Parameter(self.a)
x.grad = g
x.update_rule = update_rule
x.update()
assert update_rule.update.call_count == 1
assert update_rule.update.call_args_list[0] == [(x,), {}]
def test_update_rule_without_grad(self):
update_rule = mock.MagicMock()
x = chainer.Parameter(self.a)
x.update_rule = update_rule
x.update()
assert update_rule.update.call_count == 1
@testing.inject_backend_tests(
None,
[
{},
{'use_ideep': 'always'},
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
@testing.parameterize(
{'x_shape': (10,)},
{'x_shape': ()},
)
class TestParameterToDevice(unittest.TestCase):
def check_to_device(self, x, device):
expected_xp = device.xp
assert isinstance(x, chainer.Parameter)
x.to_device(device)
assert x.xp is expected_xp
assert x._has_chainerx_array is (expected_xp is chainerx)
def test_initializer_to_device(self, backend_config):
x = chainer.Parameter(shape=self.x_shape)
self.check_to_device(x, backend_config.device)
def test_initialize_by_scalar_to_device(self, backend_config):
x = chainer.Parameter(2., self.x_shape)
self.check_to_device(x, backend_config.device)
def test_initialize_by_initializer_to_device(self, backend_config):
x = chainer.Parameter(initializers.One(), self.x_shape)
self.check_to_device(x, backend_config.device)
def test_initialize_by_none_to_device(self, backend_config):
x = chainer.Parameter(None, self.x_shape)
self.check_to_device(x, backend_config.device)
def test_initialize_by_array_to_device(self, backend_config):
data = np.random.uniform(-1, 1, self.x_shape).astype(np.float32)
x = chainer.Parameter(data)
self.check_to_device(x, backend_config.device)
def test_internal_grad(self, backend_config):
device = backend_config.device
p = chainer.Parameter(shape=self.x_shape)
p.to_device(device)
if device.xp is chainerx:
assert p._grad is None
else:
assert isinstance(p._grad, device.supported_array_types)
@testing.parameterize(
{'x_shape': (10,)},
{'x_shape': ()},
)
@attr.chainerx
class TestParameterToChainerX(unittest.TestCase):
def check_to_chx(self, x):
assert isinstance(x, chainer.Parameter)
x.to_chx()
assert x.xp is chainerx
assert x._has_chainerx_array is True
def check_initializer(self, shape):
x = chainer.Parameter(shape=shape)
self.check_to_chx(x)
def check_initialize_by_scalar(self, shape):
x = chainer.Parameter(2., shape)
self.check_to_chx(x)
def check_initialize_by_initializer(self, shape):
x = chainer.Parameter(initializers.One(), shape)
self.check_to_chx(x)
def check_initialize_by_none(self, shape):
x = chainer.Parameter(None, shape)
self.check_to_chx(x)
def check_initialize_by_array(self, shape, xp, device=None):
if device is not None:
data = xp.random.uniform(-1, 1, shape, device=device).astype('f')
else:
data = xp.random.uniform(-1, 1, shape).astype('f')
x = chainer.Parameter(data)
self.check_to_chx(x)
def test_initializer_to_chx(self):
self.check_initializer(self.x_shape)
def test_initialize_by_scalar_to_chx(self):
self.check_initialize_by_scalar(self.x_shape)
def test_initialize_by_initializer_to_chx(self):
self.check_initialize_by_initializer(self.x_shape)
def test_initialize_by_none_to_chx(self):
self.check_initialize_by_none(self.x_shape)
def test_initialize_by_array_to_chx_numpy(self):
self.check_initialize_by_array(self.x_shape, np)
@attr.gpu
def test_initialize_by_array_to_chx_cupy(self):
self.check_initialize_by_array(self.x_shape, cuda.cupy)
@attr.chainerx
def test_initialize_by_array_to_chx_chainerx_native(self):
self.check_initialize_by_array(self.x_shape, chainerx, 'native:0')
@attr.gpu
@attr.chainerx
def test_initialize_by_array_to_chx_chainerx_cuda(self):
self.check_initialize_by_array(self.x_shape, chainerx, 'cuda:0')
@testing.parameterize(
{'x_shape': (10,)},
{'x_shape': ()},
)
@attr.chainerx
class TestParameterFromChainerX(unittest.TestCase):
def check_from_chx(self, x, expected_xp):
assert isinstance(x, chainer.Parameter)
x.from_chx()
assert x.xp is expected_xp
assert x._has_chainerx_array is (expected_xp is chainerx)
def check_initializer(self, shape, expected_xp):
x = chainer.Parameter(shape=shape)
self.check_from_chx(x, expected_xp)
def check_initialize_by_scalar(self, shape, expected_xp):
x = chainer.Parameter(2., shape)
self.check_from_chx(x, expected_xp)
def check_initialize_by_initializer(self, shape, expected_xp):
x = chainer.Parameter(initializers.One(), shape)
self.check_from_chx(x, expected_xp)
def check_initialize_by_none(self, shape, expected_xp):
x = chainer.Parameter(None, shape)
self.check_from_chx(x, expected_xp)
def check_initialize_by_array(self, shape, xp, expected_xp, device=None):
if device is not None:
data = xp.random.uniform(-1, 1, shape, device=device).astype('f')
else:
data = xp.random.uniform(-1, 1, shape).astype('f')
x = chainer.Parameter(data)
self.check_from_chx(x, expected_xp)
def test_initializer_from_chx(self):
self.check_initializer(self.x_shape, np)
def test_initialize_by_scalar_from_chx(self):
self.check_initialize_by_scalar(self.x_shape, np)
def test_initialize_by_initializer_from_chx(self):
self.check_initialize_by_initializer(self.x_shape, np)
def test_initialize_by_none_from_chx(self):
self.check_initialize_by_none(self.x_shape, np)
def test_initialize_by_array_from_chx_numpy(self):
self.check_initialize_by_array(self.x_shape, np, np)
@attr.gpu
def test_initialize_by_array_from_chx_cupy(self):
self.check_initialize_by_array(self.x_shape, cuda.cupy, cuda.cupy)
@attr.chainerx
def test_initialize_by_array_from_chx_chainerx_native(self):
self.check_initialize_by_array(self.x_shape, chainerx, np, 'native:0')
@attr.gpu
@attr.chainerx
def test_initialize_by_array_from_chx_chainerx_cuda(self):
self.check_initialize_by_array(
self.x_shape, chainerx, cuda.cupy, 'cuda:0')
@testing.inject_backend_tests(
None,
[
{},
{'use_ideep': 'always'},
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
])
class TestParameterToXpu(unittest.TestCase):
def _to_xpu(self, var, device):
if isinstance(device, backend.CpuDevice):
var.to_cpu()
elif isinstance(device, backend.GpuDevice):
var.to_gpu(device.device.id)
elif isinstance(device, backend.Intel64Device):
var.to_intel64()
else:
assert False
def test_internal_grad(self, backend_config):
device = backend_config.device
p = chainer.Parameter(shape=(2, 3))
self._to_xpu(p, device)
assert isinstance(p._grad, device.supported_array_types)
class TestUninitializedParameter(unittest.TestCase):
def setUp(self):
self.a = np.random.rand(3, 2).astype(np.float32)
self.b = np.random.rand(*self.a.shape).astype(self.a.dtype)
def test_init_without_data(self):
x = chainer.Parameter()
assert x.data is None
with pytest.raises(RuntimeError):
x.grad
def test_initialize(self):
x = chainer.Parameter()
x.initialize((3, 2))
assert x.shape == (3, 2)
assert x.dtype == np.float32
np.testing.assert_array_equal(x.data, np.float32('nan'))
np.testing.assert_array_equal(x.grad, np.float32('nan'))
assert backend.get_device_from_array(x.data).xp is np
assert backend.get_device_from_array(x.grad).xp is np
def check_constant_initialization(self, x, a, xp, expected_device):
x.initialize(a.shape)
assert isinstance(x.data, xp.ndarray)
assert x._has_chainerx_array is (xp is chainerx)
xp.testing.assert_array_equal(x.data, xp.asarray(a))
xp.testing.assert_array_equal(x.grad, np.float32('nan'))
assert backend.get_device_from_array(x.data) == expected_device
assert backend.get_device_from_array(x.grad) == expected_device
def test_initialize_with_initializer(self):
x = chainer.Parameter(initializers.Constant(self.a))
self.check_constant_initialization(
x, self.a, np, backend.CpuDevice())
def test_initialize_dtype(self):
initializer = initializers.Zero(np.float64)
x = chainer.Parameter(initializer=initializer)
x.initialize((2, 3))
assert x.data.dtype == np.float64
assert x.grad.dtype == np.float64
def test_initialize_by_callable_default_dtype(self):
def initializer(array):
array.fill(1.0)
x = chainer.Parameter(initializer=initializer)
with chainer.using_config('dtype', np.float16):
x.initialize((3, 2))
assert x.data.dtype == np.float16
assert x.grad.dtype == np.float16
def test_initialize_node(self):
initializer = initializers.Zero(np.float64)
x = chainer.Parameter(initializer=initializer)
x.initialize((2, 3))
assert x.node.shape == (2, 3)
assert x.node.dtype == np.float64
def test_copy_to_initialize(self):
# This test intends the use case of link.copy() method.
x = chainer.Parameter()
y = copy.copy(x)
x.initialize((3, 2))
assert x.data is y.data
def test_cleargrad(self):
x = chainer.Parameter()
x.cleargrad()
x.initialize((3, 2))
assert x.grad is None
def test_zerograd_dtype(self):
x = chainer.Parameter(initializers.Zero(dtype=np.float16))
with testing.assert_warns(DeprecationWarning):
x.zerograd()
x.initialize((3, 2))
assert x.grad.dtype == x.data.dtype
def test_dtype_given_by_initializer(self):
class MyInitializer(object):
dtype = 'float16'
def __call__(self, array):
assert False # never called
param = chainer.Parameter(MyInitializer())
assert param.dtype == np.float16
def test_dtype_not_given(self):
class MyInitializer(object):
def __call__(self, array):
assert False # never called
param = chainer.Parameter(MyInitializer())
with pytest.raises(RuntimeError):
param.dtype
@testing.backend.inject_backend_tests(None, _backend_params)
class TestUninitializedParameterWithDevices(unittest.TestCase):
def setUp(self):
self.a = np.random.rand(3, 2).astype(np.float32)
self.b = np.random.rand(*self.a.shape).astype(self.a.dtype)
def check_constant_initialization(
self, x, a, xp, expected_dtype, expected_device):
x.initialize(a.shape)
assert isinstance(x.data, xp.ndarray)
assert x._has_chainerx_array is (xp is chainerx)
assert x.dtype == expected_dtype
assert x.grad.dtype == expected_dtype
xp.testing.assert_array_equal(x.data, xp.asarray(a))
xp.testing.assert_array_equal(x.grad, np.float32('nan'))
assert backend.get_device_from_array(x.data) == expected_device
assert backend.get_device_from_array(x.grad) == expected_device
def test_initialize_to_device(self, backend_config):
x = chainer.Parameter(initializer=initializers.Constant(self.a))
x.to_device(backend_config.device)
self.check_constant_initialization(
x, self.a, backend_config.xp, self.a.dtype, backend_config.device)
def test_initialize_to_device_with_dtype(self, backend_config):
x = chainer.Parameter(initializer=initializers.Constant(
self.a, dtype=np.float64))
x.to_device(backend_config.device)
with chainer.using_config('dtype', np.float16):
self.check_constant_initialization(
x, self.a, backend_config.xp, np.float64,
backend_config.device)
def test_initialize_with_device(self, backend_config):
a = backend_config.get_array(self.a)
x = chainer.Parameter(initializer=initializers.Constant(a))
# Parameters arrays are always initialized in numpy side
self.check_constant_initialization(
x, self.a, np, self.a.dtype, _numpy_device)
def check_zerograd(self, x, xp):
assert isinstance(x.grad, xp.ndarray)
assert x.grad.shape == x.data.shape
assert x.grad.dtype == x.data.dtype
xp.testing.assert_array_equal(x.grad, 0)
def test_zerograd_to_device(self, backend_config):
x = chainer.Parameter()
with testing.assert_warns(DeprecationWarning):
x.zerograd()
x.to_device(backend_config.device)
x.initialize((3, 2))
self.check_zerograd(x, backend_config.xp)
def test_to_device_zerograd(self, backend_config):
x = chainer.Parameter()
x.to_device(backend_config.device)
with testing.assert_warns(DeprecationWarning):
x.zerograd()
x.initialize((3, 2))
self.check_zerograd(x, backend_config.xp)
class TestAddgradToUninitializedParameter(unittest.TestCase):
def setUp(self):
self.a = np.random.rand(3, 2).astype(np.float32)
self.b = np.random.rand(*self.a.shape).astype(self.a.dtype)
def test_addgrad_to_uninitialized_parameter(self):
x = chainer.Parameter()
y = chainer.Parameter(self.a)
y.grad = self.b
x.cleargrad()
x.addgrad(y)
assert isinstance(x.data, np.ndarray)
assert isinstance(x.grad, np.ndarray)
np.testing.assert_array_equal(x.grad, self.b)
@attr.gpu
def test_addgrad_to_uninitialized_parameter_cpu_to_gpu(self):
x = chainer.Parameter()
y = chainer.Parameter(self.a)
y.grad = self.b
x.to_gpu()
x.cleargrad()
x.addgrad(y)
cp = cuda.cupy
assert isinstance(x.data, cp.ndarray)
assert isinstance(x.grad, cp.ndarray)
cp.testing.assert_array_equal(x.grad, self.b)
@attr.gpu
def test_addgrad_to_uninitialized_parameter_gpu_to_cpu(self):
x = chainer.Parameter()
y = chainer.Parameter(self.a)
y.grad = self.b
y.to_gpu()
x.cleargrad()
x.addgrad(y)
assert isinstance(x.data, np.ndarray)
assert isinstance(x.grad, np.ndarray)
np.testing.assert_array_equal(x.grad, self.b)
@attr.gpu
def test_addgrad_to_uninitialized_parameter_gpu_to_gpu(self):
x = chainer.Parameter()
y = chainer.Parameter(self.a)
y.grad = self.b
x.to_gpu()
y.to_gpu()
x.cleargrad()
x.addgrad(y)
cp = cuda.cupy
assert isinstance(x.data, cp.ndarray)
assert isinstance(x.grad, cp.ndarray)
cp.testing.assert_array_equal(x.grad, self.b)
@attr.multi_gpu(2)
def test_addgrad_to_uninitialized_parameter_gpu_to_another_gpu(self):
x = chainer.Parameter()
y = chainer.Parameter(self.a)
y.grad = self.b
x.to_gpu(1)
y.to_gpu(0)
x.cleargrad()
x.addgrad(y)
cp = cuda.cupy
assert isinstance(x.data, cp.ndarray)
assert isinstance(x.grad, cp.ndarray)
assert int(x.data.device) == 1
assert int(x.grad.device) == 1
cp.testing.assert_array_equal(x.grad, self.b)
@attr.chainerx
def test_addgrad_to_uninitialized_parameter_cpu_to_chx(self):
# TODO(sonots): Support addgrad with ChainerX
raise unittest.SkipTest('ChainerX does not support addgrad')
class TestDebugPrint(unittest.TestCase):
def setUp(self):
self.arr = np.random.randn(5, 3, 5, 5).astype(np.float32)
def check_debug_print(self, v, mean, std):
result = v.debug_print()
assert v.summary() in result
assert 'dtype: float32' in result
# py2.7 on win64 returns shape as long
assert re.match(r'- shape: \(5L?, 3L?, 5L?, 5L?\)',
result.splitlines()[3])
# no grad
msg = 'statistics: mean={mean:.8f}, std={std:.8f}'
msg = msg.format(mean=mean, std=std)
assert msg in result
assert 'grad: None' in result
# zero grad
with testing.assert_warns(DeprecationWarning):
v.zerograd()
result = v.debug_print()
assert 'grad: 0' in result
# add grad
v.grad = v.data
result = v.debug_print()
msg = 'grad: mean={mean:.8f}, std={std:.8f}'.format(mean=mean, std=std)
assert msg in result
def check_debug_print_empty(self, v):
result = v.debug_print()
assert 'device: None' in result
assert 'backend: None' in result
assert 'shape: None' in result
assert 'dtype: None' in result
assert 'statistics: None' in result
assert 'grad: None' in result
def test_debug_print_cpu(self):
v = chainer.Variable(self.arr)
result = v.debug_print()
assert 'device: CPU' in result
assert 'numpy.ndarray' in result
self.check_debug_print(v, mean=float(np.mean(v.data)),
std=float(np.std(v.data)))
@attr.gpu
def test_debug_print_gpu(self):
v = chainer.Variable(self.arr)
v.to_gpu(0)
result = v.debug_print()
assert 'device: <CUDA Device 0>' in result
assert 'cupy' in result
assert 'ndarray' in result
self.check_debug_print(v, mean=float(cuda.cupy.mean(v.data)),
std=float(cuda.cupy.std(v.data)))
def test_debug_print_empty(self):
v = chainer.Variable()
self.check_debug_print_empty(v)
class TestVariableSetCreator(unittest.TestCase):
class MockFunction(chainer.Function):
pass
def setUp(self):
self.x = np.random.uniform(-1, 1, (2, 5)).astype(np.float32)
self.f = self.MockFunction()
self.node = self.f.node
self.node.rank = 10
def check_set_creator(self, x):
x = chainer.Variable(x)
x.set_creator(self.f)
assert x.creator == self.f
assert x.rank == 11
def test_set_creator_cpu(self):
self.check_set_creator(self.x)
@attr.gpu
def test_set_creator_gpu(self):
self.check_set_creator(cuda.to_gpu(self.x))
def check_set_creator_node(self, x):
x = chainer.Variable(x)
x.set_creator_node(self.node)
assert x.creator_node == self.node
assert x.rank == 11
def test_set_creator_node_cpu(self):
self.check_set_creator_node(self.x)
@attr.gpu
def test_set_creator_node_gpu(self):
self.check_set_creator_node(cuda.to_gpu(self.x))
class TestVariableBackwardError(unittest.TestCase):
def setUp(self):
self.x = np.array([1], np.float32)
def check_type_mismatch(self, x_data, retain):
xp = backend.get_array_module(x_data)
class DummyFunction(chainer.Function):
label = 'dummy_function'
def forward(self, inputs):
if not retain:
self.retain_inputs(())
return xp.array(1, np.float32),
def backward(self, inputs, grads):
return [1]
x = chainer.Variable(x_data)
y = DummyFunction()(x)
with six.assertRaisesRegex(self, TypeError, 'dummy_function'):
y.backward()
def test_type_mismatch_cpu(self):
self.check_type_mismatch(self.x, True)
def test_type_mismatch_unretain_cpu(self):
self.check_type_mismatch(self.x, False)
@attr.gpu
def test_type_mismatch_gpu(self):
self.check_type_mismatch(cuda.to_gpu(self.x), True)
@attr.gpu
def test_type_mismatch_unretain_gpu(self):
self.check_type_mismatch(cuda.to_gpu(self.x), False)
def check_dtype_mismatch(self, x_data, retain):
xp = backend.get_array_module(x_data)
class DummyFunction(chainer.Function):
label = 'dummy_function'
def forward(self, inputs):
if not retain:
self.retain_inputs(())
return xp.array(1, np.float32),
def backward(self, inputs, grads):
return xp.array([1], np.int32),
x = chainer.Variable(x_data)
y = DummyFunction()(x)
with six.assertRaisesRegex(self, TypeError, 'dummy_function'):
y.backward()
def test_dtype_mismatch_cpu(self):
self.check_dtype_mismatch(self.x, True)
def test_dtype_mismatch_unretain_cpu(self):
self.check_dtype_mismatch(self.x, False)
@attr.gpu
def test_dtype_mismatch_gpu(self):
self.check_dtype_mismatch(cuda.to_gpu(self.x), True)
@attr.gpu
def test_dtype_mismatch_unretain_gpu(self):
self.check_dtype_mismatch(cuda.to_gpu(self.x), False)
def check_shape_mismatch(self, x_data, retain):
xp = backend.get_array_module(x_data)
class DummyFunction(chainer.Function):
label = 'dummy_function'
def forward(self, inputs):
if not retain:
self.retain_inputs(())
return xp.array(1, np.float32),
def backward(self, inputs, grads):
return xp.array([1, 2], np.float32),
x = chainer.Variable(x_data)
y = DummyFunction()(x)
with six.assertRaisesRegex(self, ValueError, 'dummy_function'):
y.backward()
def test_shape_mismatch_cpu(self):
self.check_shape_mismatch(self.x, True)
def test_shape_mismatch_unretain_cpu(self):
self.check_shape_mismatch(self.x, False)
@attr.gpu
def test_shape_mismatch_gpu(self):
self.check_shape_mismatch(cuda.to_gpu(self.x), True)
@attr.gpu
def test_shape_mismatch_unretain_gpu(self):
self.check_shape_mismatch(cuda.to_gpu(self.x), False)
class TestVariableBackwardErrorTraceback(unittest.TestCase):
def setUp(self):
self.x = np.array([1], np.float32)
chainer.set_debug(True)
def tearDown(self):
chainer.set_debug(False)
def check_traceback(self, x_data):
xp = backend.get_array_module(x_data)
class DummyFunction(chainer.Function):
label = 'dummy_function'
def forward(self, inputs):
return xp.array(1, np.float32),
def backward(self, inputs, grads):
return xp.array([1, 2], np.float32),
x = chainer.Variable(x_data)
line = inspect.currentframe().f_lineno + 1
y = DummyFunction()(x) # `line` is THIS line
try:
y.backward()
self.fail()
except ValueError as e:
assert 'Stacktrace' in str(e)
assert 'line %d' % line in str(e)
def test_traceback_cpu(self):
self.check_traceback(self.x)
@attr.gpu
def test_traceback_gpu(self):
self.check_traceback(cuda.to_gpu(self.x))
def test_traceback_numpy_error(self):
x = chainer.Variable(np.array(0.))
line = inspect.currentframe().f_lineno + 1
y = chainer.functions.sqrt(x) # `line` is THIS line
with six.assertRaisesRegex(self, FloatingPointError, 'line %d' % line):
with np.errstate(divide='raise'):
y.backward()
def test_raise(self):
x = np.array([1], np.float32)
x = chainer.Variable(x)
y = F.identity(x)
y.grad = np.array([np.nan], np.float32)
with pytest.raises(RuntimeError):
y.backward()
def test_int(self):
x = np.array([1], np.int)
x = chainer.Variable(x)
y = F.identity(x)
y.grad = np.array([0], np.int)
y.backward()
@testing.parameterize(*testing.product({
'in_shape': [(4, 3, 2)],
'out_shape': [(2, 2, 6), (2, -1, 6), 24, (-1,), [2, 12]],
'dtype': [np.float16, np.float32, np.float64],
}))
class TestReshape(unittest.TestCase):
def setUp(self):
self.x = np.random.uniform(-1, 1, self.in_shape).astype(self.dtype)
def check_forward(self, x_data):
shape = self.out_shape
x = chainer.Variable(x_data)
y = x.reshape(shape)
assert y.data.dtype == self.dtype
assert (self.x.reshape(shape)
== backend.CpuDevice().send(y.data)).all()
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
@attr.chainerx
def test_forward_chainerx(self):
self.check_forward(chainerx.array(self.x))
def check_backward(self, x_data):
x = chainer.Variable(x_data)
y = x.reshape(self.out_shape)
y.grad = y.data
y.backward()
testing.assert_allclose(backend.CpuDevice().send(x.data),
backend.CpuDevice().send(x.grad),
atol=0, rtol=0)
def test_backward_cpu(self):
self.check_backward(self.x)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x))
@attr.chainerx
def test_backward_chainerx(self):
self.check_backward(chainerx.array(self.x))
@testing.parameterize(*testing.product({
'shape': [(0,), (0, 0), (), (1,), (1, 1), (1, 1, 1), (2,), (2, 3)],
'dtype': [np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64],
}))
class TestItem(unittest.TestCase):
def setUp(self):
self.x = np.full(self.shape, 1, self.dtype)
self.target_type = type(np.array(0, dtype=self.dtype).item())
def check_item(self, x):
var = chainer.Variable(x)
if x.size != 1:
with pytest.raises(ValueError):
var.item()
else:
value = var.item()
assert type(value) is self.target_type
def test_cpu(self):
self.check_item(self.x)
@attr.gpu
def test_gpu(self):
self.check_item(cuda.to_gpu(self.x))
def check_item_chainerx(self, x, requires_grad=True):
# TODO(crcrpar): Remove `requires_grad` argument once chainerx.ndarray
# with integral dtype supports gradient computation.
var = chainer.Variable(x, requires_grad=requires_grad)
if x.size != 1:
with pytest.raises(chainerx.DimensionError):
var.item()
else:
value = var.item()
assert type(value) is self.target_type
@attr.chainerx
def test_chainerx(self):
if self.dtype in (np.int16, np.int32, np.int64):
requires_grad = False
else:
requires_grad = True
self.check_item_chainerx(chainerx.array(self.x), requires_grad)
@testing.parameterize(*testing.product({
'in_shape': [(4, 3, 2)],
'axes': [[], [(-1, 0, 1)], [[-1, 0, 1]], [None], [-1, 0, 1]],
'dtype': [np.float16, np.float32, np.float32],
}))
class TestTranspose(unittest.TestCase):
def setUp(self):
self.x = np.random.uniform(-1, 1, self.in_shape).astype(self.dtype)
def check_forward(self, x_data):
axes = self.axes
x = chainer.Variable(x_data)
y = x.transpose(*axes)
assert y.data.dtype == self.dtype
assert (self.x.transpose(*axes) ==
backend.CpuDevice().send(y.data)).all()
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
@attr.chainerx
def test_forward_chainerx(self):
self.check_forward(chainerx.array(self.x))
def check_backward(self, x_data):
x = chainer.Variable(x_data)
y = x.transpose(*self.axes)
y.grad = y.data
y.backward()
testing.assert_allclose(x.data, x.grad, atol=0, rtol=0)
def test_backward_cpu(self):
self.check_backward(self.x)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x))
@attr.chainerx
def test_backward_chainerx(self):
self.check_backward(chainerx.array(self.x))
class UnnamedVariableToStringTestBase(object):
def setUp(self):
if self.x_shape is None:
self.x = chainer.Variable()
else:
x = np.empty(self.x_shape)
x = np.arange(x.size).reshape(self.x_shape)
x = x.astype(self.dtype)
self.x = chainer.Variable(x)
def test_repr_cpu(self):
assert repr(self.x) == self.repr
def test_str_cpu(self):
assert str(self.x) == self.str
@attr.gpu
def test_repr_gpu(self):
self.x.to_gpu()
assert repr(self.x) == self.repr
@attr.gpu
def test_str_gpu(self):
self.x.to_gpu()
assert str(self.x) == self.str
def _skip_chainerx_unsupported_dtype(self):
supported_dtypes = chainerx.testing.dtypes.all_dtypes
if (self.dtype is not None
and self.dtype.__name__ not in supported_dtypes):
raise unittest.SkipTest(
'ChainerX does not support {} dtype'.format(
self.dtype.__name__))
@attr.chainerx
def test_repr_chainerx_cpu(self):
self._skip_chainerx_unsupported_dtype()
self.x.to_chx()
assert repr(self.x) == self.repr
@attr.chainerx
def test_str_chainerx_cpu(self):
self._skip_chainerx_unsupported_dtype()
self.x.to_chx()
assert str(self.x) == self.str
@attr.chainerx
@attr.gpu
def test_repr_chainerx_gpu(self):
self._skip_chainerx_unsupported_dtype()
self.x.to_gpu()
self.x.to_chx()
assert repr(self.x) == self.repr
@attr.chainerx
@attr.gpu
def test_str_chainerx_gpu(self):
self._skip_chainerx_unsupported_dtype()
self.x.to_gpu()
self.x.to_chx()
assert str(self.x) == self.str
@testing.parameterize(
{'x_shape': None, 'dtype': None, 'repr': 'variable(None)',
'str': 'variable(None)'},
{'x_shape': (2, 2,), 'dtype': np.float16,
'repr': 'variable([[ 0., 1.],\n [ 2., 3.]])',
'str': 'variable([[ 0. 1.]\n [ 2. 3.]])'},
{'x_shape': (2, 2,), 'dtype': np.float32,
'repr': 'variable([[ 0., 1.],\n [ 2., 3.]])',
'str': 'variable([[ 0. 1.]\n [ 2. 3.]])'},
{'x_shape': (2, 2,), 'dtype': np.float64,
'repr': 'variable([[ 0., 1.],\n [ 2., 3.]])',
'str': 'variable([[ 0. 1.]\n [ 2. 3.]])'},
{'x_shape': (3,), 'dtype': np.float32,
'repr': 'variable([ 0., 1., 2.])', 'str': 'variable([ 0. 1. 2.])'},
)
@testing.with_requires('numpy<1.14')
class TestUnnamedVariableToStringLegacy(
UnnamedVariableToStringTestBase, unittest.TestCase):
# Textual representation of arrays in NumPy 1.13 or earlier.
pass
@testing.parameterize(
{'x_shape': None, 'dtype': None, 'repr': 'variable(None)',
'str': 'variable(None)'},
{'x_shape': (2, 2,), 'dtype': np.float16,
'repr': 'variable([[0., 1.],\n [2., 3.]])',
'str': 'variable([[0. 1.]\n [2. 3.]])'},
{'x_shape': (2, 2,), 'dtype': np.float32,
'repr': 'variable([[0., 1.],\n [2., 3.]])',
'str': 'variable([[0. 1.]\n [2. 3.]])'},
{'x_shape': (2, 2,), 'dtype': np.float64,
'repr': 'variable([[0., 1.],\n [2., 3.]])',
'str': 'variable([[0. 1.]\n [2. 3.]])'},
{'x_shape': (3,), 'dtype': np.float32,
'repr': 'variable([0., 1., 2.])', 'str': 'variable([0. 1. 2.])'},
)
@testing.with_requires('numpy>=1.14')
class TestUnnamedVariableToStringModern(
UnnamedVariableToStringTestBase, unittest.TestCase):
# Textual representation of arrays in NumPy 1.14 or later.
pass
class TestUnnamedVariableDim2Size0ToString(unittest.TestCase):
def setUp(self):
x = np.empty((0, 0))
x = x.astype(np.float32)
self.x = chainer.Variable(x)
if (sys.version_info < (3,) and sys.maxsize > 2**32 and
platform.system() == 'Windows'):
self.repr = 'variable([], shape=(0L, 0L))'
else:
self.repr = 'variable([], shape=(0, 0))'
self.str = 'variable([])'
def test_repr_cpu(self):
assert repr(self.x) == self.repr
def test_str_cpu(self):
assert str(self.x) == self.str
@attr.gpu
def test_repr_gpu(self):
self.x.to_gpu()
assert repr(self.x) == self.repr
@attr.gpu
def test_str_gpu(self):
self.x.to_gpu()
assert str(self.x) == self.str
class NamedVariableToStringTestBase(object):
def setUp(self):
if self.x_shape is None:
self.x = chainer.Variable(name='x')
else:
x = np.empty(self.x_shape)
x = np.arange(x.size).reshape(self.x_shape)
x = x.astype(self.dtype)
self.x = chainer.Variable(x, name='x')
def test_named_repr(self):
assert repr(self.x) == self.repr
def test_named_str(self):
assert str(self.x) == self.str
@attr.gpu
def test_repr_gpu(self):
self.x.to_gpu()
assert repr(self.x) == self.repr
@attr.gpu
def test_str_gpu(self):
self.x.to_gpu()
assert str(self.x) == self.str
@testing.parameterize(
{'x_shape': None, 'dtype': None, 'repr': 'variable x(None)',
'str': 'variable x(None)'},
{'x_shape': (2, 2,), 'dtype': np.float32,
'repr': 'variable x([[ 0., 1.],\n [ 2., 3.]])',
'str': 'variable x([[ 0. 1.]\n [ 2. 3.]])'},
{'x_shape': (), 'dtype': np.float32,
'repr': 'variable x(0.0)', 'str': 'variable x(0.0)'},
)
@testing.with_requires('numpy<1.14')
class TestNamedVariableToStringLegacy(
NamedVariableToStringTestBase, unittest.TestCase):
# Textual representation of arrays in NumPy 1.13 or earlier.
pass
@testing.parameterize(
{'x_shape': None, 'dtype': None, 'repr': 'variable x(None)',
'str': 'variable x(None)'},
{'x_shape': (2, 2,), 'dtype': np.float32,
'repr': 'variable x([[0., 1.],\n [2., 3.]])',
'str': 'variable x([[0. 1.]\n [2. 3.]])'},
{'x_shape': (), 'dtype': np.float32,
'repr': 'variable x(0.)', 'str': 'variable x(0.)'},
)
@testing.with_requires('numpy>=1.14')
class TestNamedVariableToStringModern(
NamedVariableToStringTestBase, unittest.TestCase):
# Textual representation of arrays in NumPy 1.14 or later.
pass
class TestNamedVariableDim2Size0ToString(unittest.TestCase):
def setUp(self):
x = np.empty((0, 0))
x = x.astype(np.float32)
self.x = chainer.Variable(x, name='x')
if (sys.version_info < (3,) and sys.maxsize > 2**32 and
platform.system() == 'Windows'):
self.repr = 'variable x([], shape=(0L, 0L))'
else:
self.repr = 'variable x([], shape=(0, 0))'
self.str = 'variable x([])'
def test_named_repr(self):
assert repr(self.x) == self.repr
def test_named_str(self):
assert str(self.x) == self.str
@attr.gpu
def test_repr_gpu(self):
self.x.to_gpu()
assert repr(self.x) == self.repr
@attr.gpu
def test_str_gpu(self):
self.x.to_gpu()
assert str(self.x) == self.str
class IdentityFunction(chainer.Function):
def forward(self, inputs):
return inputs
def backward(self, inputs, grad_outputs):
return grad_outputs
class TestVariableDoubleBackward(unittest.TestCase):
def test_default_backward(self):
x = chainer.Variable(np.array(42, np.float32))
y = x * 2 # x.grad_var will be different from y.grad_var
y.backward(retain_grad=True)
assert x.grad_var is not y.grad_var
assert x.grad_var.creator is None
x.grad_var.backward()
assert y.grad_var.grad_var is None
def test_raise_double_backprop(self):
x = chainer.Variable(np.array(42, np.float32))
y = IdentityFunction()(x)
y.backward(enable_double_backprop=True)
with pytest.raises(RuntimeError):
x.grad_var.backward()
def test_raise_double_backprop_2(self):
x = chainer.Variable(np.array(42, np.float32))
z = F.identity(x) # new style
y = IdentityFunction()(z) # old style
y.backward(enable_double_backprop=True)
with pytest.raises(RuntimeError):
x.grad_var.backward()
def test_grad_raise_double_backprop(self):
x = chainer.Variable(np.array(42, np.float32))
y = IdentityFunction()(x)
y.backward(enable_double_backprop=True)
with pytest.raises(RuntimeError):
chainer.grad([x.grad_var], [y.grad_var])
def test_grad_raise_double_backprop_2(self):
x = chainer.Variable(np.array(42, np.float32))
z = F.identity(x) # new style
y = IdentityFunction()(z) # old style
y.backward(enable_double_backprop=True)
with pytest.raises(RuntimeError):
chainer.grad([x.grad_var], [y.grad_var])
class TestVariableDoubleBackwardOneElementScalar(unittest.TestCase):
# Tests for old-styled (1-element array) scalar.
# See: https://github.com/chainer/chainer/pull/4199
def test_default_backward(self):
x = chainer.Variable(np.array([42], np.float32))
y = x * 2 # x.grad_var will be different from y.grad_var
with testing.assert_warns(DeprecationWarning):
y.backward(retain_grad=True)
assert x.grad_var.creator is None
with warnings.catch_warnings():
# ok to be warned that x.grad_var is old-styled scalar
warnings.simplefilter('ignore', DeprecationWarning)
x.grad_var.backward()
assert y.grad_var.grad_var is None
def test_raise_double_backprop(self):
x = chainer.Variable(np.array([42], np.float32))
y = IdentityFunction()(x)
with testing.assert_warns(DeprecationWarning):
y.backward(enable_double_backprop=True)
with pytest.raises(RuntimeError):
with warnings.catch_warnings():
# ok to be warned that x.grad_var is old-styled scalar
warnings.simplefilter('ignore', DeprecationWarning)
x.grad_var.backward()
def test_raise_double_backprop_2(self):
x = chainer.Variable(np.array([42], np.float32))
z = F.identity(x) # new style
y = IdentityFunction()(z) # old style
with testing.assert_warns(DeprecationWarning):
y.backward(enable_double_backprop=True)
with pytest.raises(RuntimeError):
with warnings.catch_warnings():
# ok to be warned that x.grad_var is old-styled scalar
warnings.simplefilter('ignore', DeprecationWarning)
x.grad_var.backward()
def test_grad_raise_double_backprop(self):
x = chainer.Variable(np.array([42], np.float32))
y = IdentityFunction()(x)
with testing.assert_warns(DeprecationWarning):
y.backward(enable_double_backprop=True)
with pytest.raises(RuntimeError):
chainer.grad([x.grad_var], [y.grad_var])
def test_grad_raise_double_backprop_2(self):
x = chainer.Variable(np.array([42], np.float32))
z = F.identity(x) # new style
y = IdentityFunction()(z) # old style
with testing.assert_warns(DeprecationWarning):
y.backward(enable_double_backprop=True)
with pytest.raises(RuntimeError):
chainer.grad([x.grad_var], [y.grad_var])
@testing.backend.inject_backend_tests(None, _backend_params)
class TestAsVariable(unittest.TestCase):
def test_to_variable_from_array(self, backend_config):
x = backend_config.get_array(np.random.randn(1).astype(np.float32))
y = chainer.as_variable(x)
assert isinstance(y, chainer.Variable)
assert y.requires_grad is False
if backend_config.xp is chainerx:
# chainerx
assert y.array.shape == x.shape
assert y.array.device == x.device
assert y.array.strides == x.strides
assert not y.array.is_backprop_required()
chainerx.testing.assert_array_equal(y.array, x)
else:
# non-chainerx
assert y.array is x
def check_to_variable_from_variable(self, backend_config, requires_grad):
x_arr = backend_config.get_array(np.random.randn(1).astype(np.float32))
x = chainer.Variable(x_arr, requires_grad=requires_grad)
y = chainer.as_variable(x)
assert y is x
assert y.requires_grad is requires_grad
def test_to_variable_from_variable(self, backend_config):
self.check_to_variable_from_variable(backend_config, True)
self.check_to_variable_from_variable(backend_config, False)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestAsArray(unittest.TestCase):
def test_to_array_from_array(self, backend_config):
x = backend_config.get_array(np.random.randn(1).astype(np.float32))
y = chainer.as_array(x)
assert y is x
def check_to_array_from_variable(self, backend_config, requires_grad):
x_arr = backend_config.get_array(np.random.randn(1).astype(np.float32))
x = chainer.Variable(x_arr, requires_grad=requires_grad)
y = chainer.as_array(x)
assert y is x.array
def test_to_array_from_variable(self, backend_config):
self.check_to_array_from_variable(backend_config, True)
self.check_to_array_from_variable(backend_config, False)
@testing.parameterize(*testing.product({
'in_shape': [(4, 3, 2)],
'dtype': [np.float16, np.float32, np.float64],
'loss_scale': [None, 1, 10],
}))
class TestLossScale(unittest.TestCase):
def setUp(self):
self.x = np.random.uniform(-1, 1, self.in_shape).astype(self.dtype)
self.y = np.random.uniform(-1, 1, self.in_shape).astype(self.dtype)
def check_loss_scale(self, xp, x_data, y_data):
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
z = x * y
loss = F.sum(z)
loss.backward(loss_scale=self.loss_scale)
# ChainerX scales back gradients on the backward method
if xp is not chainerx and self.loss_scale is not None:
x.grad /= self.loss_scale
y.grad /= self.loss_scale
rtol, atol = 1e-4, 1e-5
if self.dtype is np.float16:
rtol, atol = 1e-1, 1e-2
testing.assert_allclose(x.data, y.grad, rtol=rtol, atol=atol)
testing.assert_allclose(y.data, x.grad, rtol=rtol, atol=atol)
def test_loss_scale_cpu(self):
self.check_loss_scale(np, self.x, self.y)
@attr.gpu
def test_loss_scale_gpu(self):
self.check_loss_scale(cuda, cuda.to_gpu(self.x), cuda.to_gpu(self.y))
@attr.chainerx
def test_loss_scale_chainerx_cpu(self):
x = chainerx.array(self.x, device='native:0')
y = chainerx.array(self.y, device='native:0')
self.check_loss_scale(chainerx, x, y)
@attr.gpu
@attr.chainerx
def test_loss_scale_chainerx_gpu(self):
x = chainerx.array(self.x, device='cuda:0')
y = chainerx.array(self.y, device='cuda:0')
self.check_loss_scale(chainerx, x, y)
@testing.parameterize(*testing.product({
# ideep2.0.0 not support shape 0
'shape': [(1,), (3, 2), (2, 3, 4, 3)],
'dtype': [
np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32,
np.uint64, np.float16, np.float32, np.float64],
}))
@attr.ideep
class TestIntel64(unittest.TestCase):
def setUp(self):
self.x_data = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
def _check_variable_shape_and_dtype(self, var):
assert var.data.shape == self.shape
assert var.data.dtype == self.dtype
assert var.shape == self.shape
assert var.dtype == self.dtype
def test_cpu_to_intel64(self):
x = chainer.Variable(self.x_data)
assert x.xp is np
assert x._has_chainerx_array is False
prev_x_data = x.data
x.to_intel64()
assert x.xp is np
assert x._has_chainerx_array is False
# Converted to mdarray only if dtype == float32.
# Otherwise, data should be left untouched.
if self.dtype == np.float32:
assert isinstance(x.data, intel64.ideep.mdarray)
else:
assert x.data is prev_x_data
self._check_variable_shape_and_dtype(x)
def test_intel64_to_intel64(self):
x = chainer.Variable(self.x_data)
x.to_intel64()
prev_x_data = x.data
x.to_intel64()
# Data should be left untouched
assert x.data is prev_x_data
@attr.gpu
def test_gpu_to_intel64(self):
x = chainer.Variable(self.x_data)
x.to_gpu()
x.to_intel64()
# Converted to mdarray only if dtype == float32.
# Otherwise, data should be converted to numpy.ndarray.
if self.dtype == np.float32:
assert isinstance(x.data, intel64.ideep.mdarray)
else:
assert isinstance(x.data, np.ndarray)
self._check_variable_shape_and_dtype(x)
@attr.gpu
def test_intel64_to_gpu(self):
x = chainer.Variable(self.x_data)
x.to_intel64()
x.to_gpu()
# Data should be converted to cuda.ndarray
assert isinstance(x.data, cuda.cupy.ndarray)
self._check_variable_shape_and_dtype(x)
def test_intel64_to_cpu(self):
x = chainer.Variable(self.x_data)
x.to_intel64()
x.to_cpu()
# Data should be converted to numpy.ndarray
assert isinstance(x.data, np.ndarray)
self._check_variable_shape_and_dtype(x)
@testing.parameterize(*testing.product({
'shape': [(), (3, 2, 3), (4, 4, 3, 2, 3)],
'dtype': [
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.float16, np.float32, np.float64,
],
}))
@attr.ideep
class TestIntel64Unsupported(unittest.TestCase):
"""Tests for arrays that should not be converted to iDeep array."""
def setUp(self):
self.x_data = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
def test_cpu_to_intel64(self):
x = chainer.Variable(self.x_data)
x.to_intel64()
assert isinstance(x.data, np.ndarray)
@attr.gpu
def test_gpu_to_intel64(self):
x = chainer.Variable(self.x_data)
x.to_gpu()
x.to_intel64()
assert isinstance(x.data, np.ndarray)
@testing.parameterize(*testing.product({
'shape': [(3,), (3, 2), (3, 2, 2), (3, 2, 2, 3)],
'dtype': [
np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32,
np.uint64, np.float16, np.float32, np.float64],
}))
class TestLazyGradSum(unittest.TestCase):
def setUp(self):
self.x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
y10 = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
gy00 = chainer.Variable(
np.random.uniform(-1, 1, self.shape).astype(self.dtype))
f10 = chainer.FunctionNode()
f10.check_type_forward = mock.MagicMock()
f10.forward_cpu = mock.MagicMock(return_value=(y10,))
f10.retain_outputs((0,))
f10.backward = mock.MagicMock(return_value=(gy00,))
self.y10 = y10
self.f10 = f10
self.gy00 = gy00
y11 = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
gy01 = chainer.Variable(
np.random.uniform(-1, 1, self.shape).astype(self.dtype))
f11 = chainer.FunctionNode()
f11.check_type_forward = mock.MagicMock()
f11.forward_cpu = mock.MagicMock(return_value=(y11,))
f11.retain_outputs((0,))
f11.backward = mock.MagicMock(return_value=(gy01,))
self.y11 = y11
self.f11 = f11
self.gy01 = gy01
y12 = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
gy02 = chainer.Variable(
np.random.uniform(-1, 1, self.shape).astype(self.dtype))
f12 = chainer.FunctionNode()
f12.check_type_forward = mock.MagicMock()
f12.forward_cpu = mock.MagicMock(return_value=(y12,))
f12.retain_outputs((0,))
f12.backward = mock.MagicMock(return_value=(gy02,))
self.y12 = y12
self.f12 = f12
self.gy02 = gy02
y = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
gy10 = chainer.Variable(
np.random.uniform(-1, 1, self.shape).astype(self.dtype))
gy11 = chainer.Variable(
np.random.uniform(-1, 1, self.shape).astype(self.dtype))
gy12 = chainer.Variable(
np.random.uniform(-1, 1, self.shape).astype(self.dtype))
f2 = chainer.FunctionNode()
f2.check_type_forward = mock.MagicMock()
f2.forward_cpu = mock.MagicMock(return_value=(y,))
f12.retain_outputs((0,))
f2.backward = mock.MagicMock(return_value=(gy10, gy11, gy12))
self.y = y
self.f2 = f2
self.gy10 = gy10
self.gy11 = gy11
self.gy12 = gy12
self.gx = gy00 + gy01 + gy02
def forward(self, x):
y0 = F.identity(x)
y10 = self.f10.apply((y0,))
y11 = self.f11.apply((y0,))
y12 = self.f12.apply((y0,))
y = self.f2.apply((y10[0], y11[0], y12[0]))
return y
def check_backward(self):
x = chainer.Variable(self.x)
y = self.forward(x)
y[0].grad = np.ones(y[0].shape, y[0].dtype)
y[0].backward()
testing.assert_allclose(self.gx.data, x.grad, atol=1e-3, rtol=1e-2)
def test_backward_cpu(self):
with chainer.using_config('lazy_grad_sum', False):
self.check_backward()
def test_backward_cpu_lazy_grad_sum(self):
with chainer.using_config('lazy_grad_sum', True):
self.check_backward()
@testing.parameterize(*(
testing.product({
'from_connected': [True, False],
'calculate_by_variable': [True, False],
'backward_by_variable': [True, False],
})))
@attr.chainerx
class TestVariableChainerxArrayViewBackprop(unittest.TestCase):
def test_chx_array_view(self):
from_connected = self.from_connected
calculate_by_variable = self.calculate_by_variable
backward_by_variable = self.backward_by_variable
# Create an original array, either connected or disconnected.
a = chainerx.array([1, 2], np.float32)
if from_connected:
a.require_grad()
# Wrap with a variable
x = chainer.Variable(a, requires_grad=True)
x_arr = x.chx_array # Unwrap a view
assert x_arr.is_backprop_required()
assert not x_arr.is_grad_required()
assert a is not x_arr # x_arr is a view of a
if calculate_by_variable:
# Calculate by variable
y = F.square(x_arr)
# Unwrap the output array
y_arr = y.chx_array
y_arr.grad = chainerx.ones_like(y.array)
else:
# Calculate by array
y_arr = chainerx.square(x_arr)
y_arr.grad = chainerx.ones_like(y_arr)
# Wrap y with variable
y = chainer.Variable(y_arr, requires_grad=True)
# Backward
if backward_by_variable:
y.backward()
else:
y_arr.backward()
# x.grad is set
assert x.grad is not None
chainerx.testing.assert_array_equal_ex(
chainerx.array([2, 4], np.float32), x.grad)
@attr.chainerx
class TestVariableChainerxArrayView(unittest.TestCase):
def test_unwrap_disconnected(self):
a = chainerx.array([1, 2], np.float32)
# Wrap with a variable
x = chainer.Variable(a, requires_grad=False)
x_arr = x.chx_array # Unwrap a view
assert not x_arr.is_backprop_required()
x_arr.require_grad()
assert x_arr.is_backprop_required()
x_arr2 = x.chx_array # Unwrap another view
# require_grad does not affect distinct views.
assert not x_arr2.is_backprop_required()
# Nor does it affect the original array.
assert not a.is_backprop_required()
def test_unwrap_non_chainerx(self):
a = np.array([1, 2], np.float32)
x = chainer.Variable(a, requires_grad=True)
with pytest.raises(ValueError):
x.chx_array
testing.run_module(__name__, __file__)
| 103,385
| 32.264479
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_optimizer.py
|
import copy
import unittest
import warnings
import mock
import numpy as np
import pytest
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import optimizer
from chainer import optimizers
from chainer import serializer
from chainer import testing
from chainer.testing import attr
import chainerx
if chainerx.is_available():
import chainerx.testing
_backend_params = [
# NumPy
{},
{'use_ideep': 'always'},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
class TestHyperparameter(unittest.TestCase):
def setUp(self):
self.parent = optimizer.Hyperparameter()
self.parent.x = 1
self.parent.y = 2
self.child = optimizer.Hyperparameter(self.parent)
self.child.y = 3
self.child.z = 4
def test_getattr(self):
self.assertTrue(hasattr(self.parent, 'x'))
self.assertEqual(self.parent.x, 1)
self.assertTrue(hasattr(self.parent, 'y'))
self.assertEqual(self.parent.y, 2)
self.assertFalse(hasattr(self.parent, 'z'))
self.assertTrue(hasattr(self.child, 'x'))
self.assertEqual(self.child.x, 1)
self.assertTrue(hasattr(self.child, 'y'))
self.assertEqual(self.child.y, 3)
self.assertTrue(hasattr(self.child, 'z'))
self.assertEqual(self.child.z, 4)
def test_get_dict(self):
self.assertEqual(self.parent.get_dict(), {'x': 1, 'y': 2})
self.assertEqual(self.child.get_dict(), {'x': 1, 'y': 3, 'z': 4})
def test_repr(self):
self.assertEqual(repr(self.parent), 'Hyperparameter(x=1, y=2)')
self.assertEqual(repr(self.child), 'Hyperparameter(x=1, y=3, z=4)')
def test_deep_copy(self):
parent_copy, child_copy = copy.deepcopy([self.parent, self.child])
self.assertEqual(self.child.get_dict(), child_copy.get_dict())
self.assertEqual(self.parent.get_dict(), parent_copy.get_dict())
self.assertIs(child_copy.parent, parent_copy)
class DummyDeserializer(serializer.Deserializer):
def __init__(self, target):
super(DummyDeserializer, self).__init__()
self.target = target
def __getitem__(self, key):
raise NotImplementedError
def __call__(self, key, value):
if value is None:
value = self.target[key]
elif isinstance(value, np.ndarray):
np.copyto(value, self.target[key])
else:
value = type(value)(np.asarray(self.target[key]))
return value
def _create_update_rule(has_states):
class SimpleUpdateRule(optimizer.UpdateRule):
def update_core_cpu(self, param):
pass
def update_core_gpu(self, param):
pass
def _init_state(data):
state = update_rule.state
state['a'] = 0
state['b'] = np.array([1, 2, 3], dtype=np.float32)
update_rule = SimpleUpdateRule()
update_rule.update_core_cpu = mock.MagicMock(
wraps=update_rule.update_core_cpu)
update_rule.update_core_gpu = mock.MagicMock(
wraps=update_rule.update_core_gpu)
update_rule.update_core_chainerx = mock.MagicMock(
wraps=update_rule.update_core_chainerx)
if has_states:
update_rule.init_state = _init_state
return update_rule
def _create_var():
data = np.ones((2, 3), np.float32)
grad = np.ones_like(data)
var = chainer.Variable(data, grad=grad)
return var
@testing.backend.inject_backend_tests(
[
'test_update',
'test_add_hook',
'test_add_hook_with_name',
'test_add_hook_with_function_name',
],
_backend_params)
class TestUpdateRule(unittest.TestCase):
def setUp(self):
self.update_rule = _create_update_rule(has_states=False)
self.var = _create_var()
def check_update(self, backend_config):
var = self.var
var.to_device(backend_config.device)
update_rule = self.update_rule
update_rule.update(var)
xp = backend_config.xp
# First check update_core_chainerx.
# If xp is chainerx, fallback xp is assigned to it for the second
# check.
if xp is chainerx:
self.assertEqual(
self.update_rule.update_core_chainerx.call_count, 1)
xp = backend_config.device.fallback_device.xp
else:
self.assertEqual(
self.update_rule.update_core_chainerx.call_count, 0)
# Secondly check update_core_cpu and _gpu.
if xp is np:
self.assertEqual(update_rule.update_core_cpu.call_count, 1)
self.assertEqual(update_rule.update_core_gpu.call_count, 0)
elif xp is cuda.cupy:
self.assertEqual(self.update_rule.update_core_cpu.call_count, 0)
self.assertEqual(self.update_rule.update_core_gpu.call_count, 1)
def test_update(self, backend_config):
self.check_update(backend_config)
def test_add_hook(self, backend_config):
hook = mock.MagicMock()
self.update_rule.add_hook(hook)
self.check_update(backend_config)
self.assertEqual(hook.call_count, 1)
args = hook.call_args_list[0][0]
self.assertEqual(len(args), 2)
self.assertIs(args[0], self.update_rule)
self.assertIs(args[1], self.var)
def test_add_hook_with_name(self, backend_config):
hook = mock.MagicMock()
self.update_rule.add_hook(hook, name='hook')
self.check_update(backend_config)
self.assertEqual(hook.call_count, 1)
args = hook.call_args_list[0][0]
self.assertEqual(len(args), 2)
self.assertIs(args[0], self.update_rule)
self.assertIs(args[1], self.var)
def test_remove_hook(self, backend_config):
hook = mock.MagicMock()
self.update_rule.add_hook(hook, name='hook')
self.update_rule.remove_hook('hook')
self.check_update(backend_config)
self.assertEqual(hook.call_count, 0)
def test_add_hook_with_function_name(self, backend_config):
hook_body = mock.MagicMock()
def foo(update_rule, data, grad):
hook_body(update_rule, data, grad)
self.update_rule.add_hook(foo)
self.update_rule.remove_hook('foo')
self.check_update(backend_config)
self.assertEqual(hook_body.call_count, 0)
def test_add_hook_no_name(self):
class CallableWithoutName(object):
def __call__(self, update_rule, param):
pass
with self.assertRaises(ValueError):
self.update_rule.add_hook(CallableWithoutName())
def test_add_hook_duplicated_name(self):
self.update_rule.add_hook(mock.MagicMock(), name='foo')
with self.assertRaises(KeyError):
self.update_rule.add_hook(mock.MagicMock(), name='foo')
def test_remove_hook_not_exist(self):
with self.assertRaises(KeyError):
self.update_rule.remove_hook('foo')
def test_disabled_update_rule(self):
self.update_rule.update_core = mock.MagicMock()
self.update_rule.enabled = False
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core.call_count, 0)
self.update_rule.enabled = True
self.update_rule.update(self.var)
self.assertEqual(self.update_rule.update_core.call_count, 1)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestOptimizerSerialize(unittest.TestCase):
def setUp(self):
self.update_rule = _create_update_rule(has_states=True)
def get_target(self, backend_config):
target = {}
target['t'] = 100
target['a'] = 1
target['b'] = (
backend_config.get_array(np.array([2, 3, 4], dtype=np.float32)))
return target
def test_deserialize(self, backend_config):
target = self.get_target(backend_config)
self.update_rule.serialize(DummyDeserializer(target))
self.assertEqual(self.update_rule.t, target['t'])
self.assertIsNotNone(self.update_rule.state)
self.assertEqual(self.update_rule.state['a'], target['a'])
backend_config.xp.testing.assert_array_equal(
self.update_rule.state['b'], target['b'])
def test_deserialize_by_strict_deserializer(self, backend_config):
target = self.get_target(backend_config)
del target['a']
with self.assertRaises(KeyError):
self.update_rule.serialize(DummyDeserializer(target))
def test_deserialize_by_nonstrict_deserializer(self, backend_config):
target = self.get_target(backend_config)
target['a'] = None
self.update_rule.serialize(DummyDeserializer(target))
self.assertEqual(self.update_rule.t, target['t'])
self.assertIsNone(self.update_rule.state)
def test_deserialize_disabled_update_rule_by_strict_deserializer(
self, backend_config):
self.update_rule.enabled = False
target = self.get_target(backend_config)
del target['a']
self.update_rule.serialize(DummyDeserializer(target))
self.assertEqual(self.update_rule.t, target['t'])
self.assertIsNone(self.update_rule.state)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestUpdateRuleCopyState(unittest.TestCase):
def setUp(self):
self.update_rule = _create_update_rule(has_states=True)
def test_state_copy(self, backend_config, _):
def update_core(param):
self.assertIsInstance(self.update_rule.state['a'], int)
self.assertTrue(
backend_config.device.is_array_supported(
self.update_rule.state['b']))
self.update_rule.update_core = update_core
var = _create_var()
var.to_device(backend_config.device)
self.update_rule.update(var)
def test_state_copy_to_another_device(
self, backend_config1, backend_config2):
def update_core(param):
self.assertIsInstance(self.update_rule.state['a'], int)
self.assertTrue(
backend_config2.device.is_array_supported(
self.update_rule.state['b']))
var1 = _create_var()
var1.to_device(backend_config1.device)
# call update with arrays on GPU 0 (tested by another method)
self.update_rule.update_core = lambda param: None
self.update_rule.update(var1)
# check if it copies the states correctly when arrays on another device
# are passed
self.update_rule.update_core = update_core
var2 = _create_var()
var2.to_device(backend_config2.device)
self.update_rule.update(var2)
class TestOptimizer(unittest.TestCase):
def setUp(self):
self.optimizer = optimizer.Optimizer()
def test_new_epoch(self):
self.optimizer.new_epoch()
self.assertEqual(1, self.optimizer.epoch)
def test_invalid_new_epoch(self):
self.optimizer.use_auto_new_epoch = True
with self.assertRaises(RuntimeError):
self.optimizer.new_epoch()
def test_auto_new_epoch(self):
self.optimizer.use_auto_new_epoch = True
self.optimizer.new_epoch(auto=True)
self.assertEqual(1, self.optimizer.epoch)
def test_invalid_auto_new_epoch(self):
with self.assertRaises(RuntimeError):
self.optimizer.new_epoch(auto=True)
@attr.chainerx
class TestOptimizerWithChainerxImplementation(unittest.TestCase):
# This test ensures an optimizer can update ChainerX array by overriding
# update_core_chainerx().
def test_upate(self):
initial_p = np.array([1., 2., 3.], np.float32)
x = chainerx.array([2., 4., 6.], np.float32)
expected_p = 4. * initial_p - 6. * backend.CpuDevice().send(x)
class ChainerxUpdateRule(optimizer.UpdateRule):
call_count = 0
def update_core_chainerx(self, param):
# p <= 3 * p - 2 * (dy/dp)
array = param.array
t1 = param.array.as_grad_stopped() * 3.
t2 = param.grad.as_grad_stopped() * 2.
delta = t1 - t2
array += delta
self.call_count += 1
class ChainerxOptimizer(optimizer.GradientMethod):
def create_update_rule(self):
return ChainerxUpdateRule(self.hyperparam)
class Link(chainer.Link):
def __init__(self):
super(Link, self).__init__()
with self.init_scope():
self.p = chainer.Parameter(initial_p)
def forward(self, x):
return 3. * x * self.p
link = Link()
link.to_device('native:0')
y = link(x)
y.backward()
optimizer_ = ChainerxOptimizer()
optimizer_.setup(link)
optimizer_.update()
assert link.p.update_rule.call_count == 1
np.testing.assert_array_equal(
backend.CpuDevice().send(link.p.array), expected_p)
class TestOptimizerHook(unittest.TestCase):
def setUp(self):
self.optimizer = optimizer.Optimizer()
self.target = SimpleLink(
np.arange(6, dtype=np.float32).reshape(2, 3),
np.arange(3, -3, -1, dtype=np.float32).reshape(2, 3))
def test_add_hook(self):
h1 = mock.MagicMock(timing='pre')
h1.call_for_each_param = False
self.optimizer.setup(self.target)
self.optimizer.add_hook(h1, 'h1')
self.optimizer.call_hooks()
h1.assert_called_with(self.optimizer)
def test_add_hook_call_for_each_param(self):
h1 = mock.MagicMock(timing='pre')
h1.call_for_each_param = True
self.optimizer.setup(self.target)
self.optimizer.add_hook(h1, 'h1')
self.optimizer.call_hooks()
h1.assert_called_with(self.target.param.update_rule, self.target.param)
def test_remove_hook(self):
h1 = mock.MagicMock(timing='pre')
self.optimizer.setup(self.target)
self.optimizer.add_hook(h1, 'h1')
self.optimizer.remove_hook('h1')
self.optimizer.call_hooks()
self.assertFalse(h1.called)
def test_duplicated_hook(self):
self.optimizer.setup(self.target)
self.optimizer.add_hook(lambda s: None, 'h1', timing='pre')
with self.assertRaises(KeyError):
self.optimizer.add_hook(lambda s: None, 'h1', timing='pre')
def test_invalid_hook(self):
self.optimizer.setup(self.target)
with self.assertRaises(TypeError):
self.optimizer.add_hook(1)
def test_add_hook_before_setup(self):
with self.assertRaises(RuntimeError):
self.optimizer.add_hook(lambda s: None, 'h1')
class SimpleLink(chainer.Link):
def __init__(self, w, g):
super(SimpleLink, self).__init__()
with self.init_scope():
self.param = chainer.Parameter(w)
self.param.grad = g
@testing.backend.inject_backend_tests(['test_update'], _backend_params)
class TestGradientMethod(unittest.TestCase):
def setUp(self):
self.optimizer = chainer.GradientMethod()
self.target = chainer.ChainList(
SimpleLink(np.arange(3).astype(np.float32),
np.arange(3).astype(np.float32)),
SimpleLink(np.arange(3).astype(np.float32),
np.arange(3).astype(np.float32)))
self.optimizer.create_update_rule = mock.MagicMock
def test_setup(self):
create_update_rule = mock.MagicMock()
target = self.target
optimizer = self.optimizer
optimizer.create_update_rule = create_update_rule
optimizer.setup(target)
self.assertEqual(create_update_rule.call_count, 2)
self.assertEqual(create_update_rule.call_args_list[0], [(), {}])
self.assertEqual(create_update_rule.call_args_list[1], [(), {}])
def test_update(self, backend_config):
target = self.target
optimizer = self.optimizer
target.to_device(backend_config.device)
optimizer.setup(target)
self.assertEqual(optimizer.t, 0)
optimizer.update()
self.assertEqual(optimizer.t, 1)
param1 = target[0].param
param2 = target[1].param
param1.update_rule.update.assert_called_once_with(param1)
param2.update_rule.update.assert_called_once_with(param2)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.parameterize(*testing.product({
'override_pattern': [
'generic', # only update_core() is overridden
'cpu_gpu', # update_core_{cpu,gpu} are overridden
'cpu_gpu_chx', # update_core_{cpu,gpu,chainerx} are overridden
],
}))
class TestGradientMethodUpdate(unittest.TestCase):
"""Ensures UpdateRule's appropriate methods are called, for various
override patterns and parameters with various conditions."""
def create(self, device):
class MyLink(chainer.Link):
def __init__(self):
super(MyLink, self).__init__()
with self.init_scope():
self.p1 = chainer.Parameter() # uninitialized
self.p2 = chainer.Parameter( # initialized, with grad
np.array([3, 2], np.float32))
self.p2.grad = np.array([13, 12], np.float32)
self.p3 = chainer.Parameter( # initialized, without grad
np.array([5, 7], np.float32))
call_record = []
override_pattern = self.override_pattern
class MyUpdateRule(optimizer.UpdateRule):
if override_pattern == 'generic':
def update_core(self, param):
call_record.append(('update_core', param))
elif override_pattern == 'cpu_gpu':
def update_core_cpu(self, param):
call_record.append(('update_core_cpu', param))
def update_core_gpu(self, param):
call_record.append(('update_core_gpu', param))
elif override_pattern == 'cpu_gpu_chx':
def update_core_cpu(self, param):
call_record.append(('update_core_cpu', param))
def update_core_gpu(self, param):
call_record.append(('update_core_gpu', param))
def update_core_chainerx(self, param):
call_record.append(('update_core_chainerx', param))
else:
assert False, override_pattern
class MyOptimizer(optimizer.GradientMethod):
def create_update_rule(self):
return MyUpdateRule()
optimizer_ = MyOptimizer()
target = MyLink()
target.to_device(device)
optimizer_.setup(target)
return optimizer_, call_record
def test_update(self, backend_config):
device = backend_config.device
override_pattern = self.override_pattern
optimizer, call_record = self.create(device)
optimizer.update()
self.assertEqual(len(call_record), 3)
# Detemine the expected method name that was called.
if override_pattern == 'generic':
method_name = 'update_core'
elif override_pattern == 'cpu_gpu':
if isinstance(device, backend.ChainerxDevice):
xp = device.fallback_device.xp
else:
xp = device.xp
if xp is np:
method_name = 'update_core_cpu'
else:
assert xp is cuda.cupy
method_name = 'update_core_gpu'
elif override_pattern == 'cpu_gpu_chx':
if isinstance(device, backend.ChainerxDevice):
method_name = 'update_core_chainerx'
elif device.xp is np:
method_name = 'update_core_cpu'
else:
assert device.xp is cuda.cupy
method_name = 'update_core_gpu'
else:
assert False, override_pattern
# Check call record.
# TODO(niboshi): Check the param argument as well.
self.assertEqual(call_record[0][0], method_name)
self.assertEqual(call_record[1][0], method_name)
self.assertEqual(call_record[2][0], method_name)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.parameterize(*testing.product({
'shape': [(4, 3, 2)],
'dtype': [np.float16, np.float32, np.float64],
'loss_scale': [None, 1, 10],
}))
class TestGradientMethodLossScale(unittest.TestCase):
def setUp(self):
param0_data = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
param0_grad = np.copy(param0_data)
param1_data = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
param1_grad = np.copy(param1_data)
self.target = chainer.ChainList(
SimpleLink(param0_data, param0_grad),
SimpleLink(param1_data, param1_grad))
lr = 1.0
if self.loss_scale is not None:
lr = self.loss_scale
for i in range(2):
self.target[i].param._loss_scale = self.loss_scale
# TODO(niboshi): Do not use SGD in GradientMethod test
self.optimizer = chainer.optimizers.SGD(lr)
def test_update(self, backend_config):
if backend_config.xp is chainerx:
# ChainerX performs the loss scaling on its own backward
# method, the optimizer should not divide back the parameters
# This test is not actually creating a ChainerX
# computation graph so no actual loss scale is being done
self.optimizer.lr = 1.0
target = self.target
optimizer = self.optimizer
target.to_device(backend_config.device)
optimizer.setup(target)
optimizer.update()
xp = backend.get_array_module(target[0].param)
expected_data = xp.zeros(self.shape, dtype=self.dtype)
rtol, atol = 1e-4, 1e-5
if self.dtype is np.float16:
rtol, atol = 1e-1, 1e-2
for i in range(2):
testing.assert_allclose(
target[i].param.data, expected_data,
rtol=rtol, atol=atol)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestCleargradHook(unittest.TestCase):
def setUp(self):
self.target = SimpleLink(
np.arange(6, dtype=np.float32).reshape(2, 3),
np.arange(3, -3, -1, dtype=np.float32).reshape(2, 3))
def test_cleargrad(self, backend_config):
class CleargradHook(object):
name = 'Cleargrad'
timing = 'pre'
def __init__(self, _):
pass
def __call__(self, opt):
for param in opt.target.params():
# Clear all grads
param.cleargrad()
target = self.target
target.to_device(backend_config.device)
# TODO(niboshi): Do not use SGD in GradientMethod test
opt = optimizers.SGD(lr=1)
opt.setup(target)
opt.add_hook(CleargradHook(self))
opt.add_hook(DummyHook(self))
opt.update()
class DummyOptimizer(chainer.GradientMethod):
def __init__(self, test):
super(DummyOptimizer, self).__init__()
self.test = test
def create_update_rule(self):
return mock.MagicMock()
class DummyHook(object):
name = 'Dummy'
timing = 'pre'
def __init__(self, test):
self.test = test
def __call__(self, opt):
for param in opt.target.params():
# Confirm all grads are not None
self.test.assertIsNotNone(param.grad)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestGradientMethodClearGrads(unittest.TestCase):
def setUp(self):
self.optimizer = DummyOptimizer(self)
self.target = SimpleLink(
np.arange(3).astype(np.float32),
np.arange(3).astype(np.float32))
self.optimizer.setup(self.target)
self.optimizer.add_hook(DummyHook(self))
def test_update(self, backend_config):
target = self.target
optimizer = self.optimizer
target.to_device(backend_config.device)
target.cleargrads()
optimizer.update()
class TestDeprecatedOptimizerHooksEmitsWarning(unittest.TestCase):
def setUp(self):
self.context = warnings.catch_warnings(record=True)
self.warnings = self.context.__enter__()
warnings.filterwarnings(action='always', category=DeprecationWarning)
def tearDown(self):
self.context.__exit__()
def test_gradient_clipping(self):
chainer.optimizer.GradientClipping(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_gradient_hard_clipping(self):
chainer.optimizer.GradientHardClipping(1., 2.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_gradient_noise(self):
chainer.optimizer.GradientNoise(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_lasso(self):
chainer.optimizer.Lasso(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
def test_weight_decay(self):
chainer.optimizer.WeightDecay(1.)
self.assertEqual(len(self.warnings), 1)
self.assertIs(self.warnings[-1].category, DeprecationWarning)
@testing.parameterize(*testing.product({
# None: dtype is not given by initializer.
# Otherwise: it's given by initializer.
'dtype': [None, np.float16, np.float32, np.float64]
}))
class TestUpdateRuleUseFp32Update(unittest.TestCase):
def test_uninitialized_parameter(self):
dtype = self.dtype
def initializer(array):
assert False # never called
# Set initializer.dtype to specify the parameter's dtype
if dtype is not None:
initializer.dtype = dtype
# Create an uninitialized parameter
param = chainer.Parameter(initializer)
assert param.array is None
if dtype is not None:
assert param.dtype == dtype
# Create an update rule with custom update_core
record = []
update_rule = chainer.UpdateRule()
def update_core(param):
# param.dtype may not be retrieved because it can be uninitialized
# and dtype is not given (i.e. self.dtype is None)
try:
param_dtype = param.dtype
except RuntimeError:
param_dtype = None
record.append({
'param': param,
'dtype': param_dtype,
})
update_rule.update_core = update_core
# Enable fp32 update
update_rule.use_fp32_update()
# Call update_rule.update
update_rule.update(param)
if dtype == np.float16:
assert record[0]['param'] is not param
assert record[0]['dtype'] == np.float32
else:
assert record[0]['param'] is param
assert record[0]['dtype'] == dtype
# The original parameter is kept uninitialized and its dtype is
# unchanged.
assert param.array is None
if dtype is not None:
assert param.dtype == dtype
else:
with pytest.raises(RuntimeError):
param.dtype
testing.run_module(__name__, __file__)
| 27,955
| 32.400239
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_chainer_objects.py
|
import importlib
import inspect
import pkgutil
import types
import unittest
import six
import chainer
from chainer import testing
module_prefix = 'chainer.'
def walk_modules():
root = chainer.__path__
for loader, modname, ispkg in pkgutil.walk_packages(root, module_prefix):
# Skip modules generated by protobuf.
if '_pb2' in modname:
continue
try:
mod = importlib.import_module(modname)
except ImportError:
continue
yield mod
def get_classes(module):
# Enumerate classes from a module
for name, o in module.__dict__.items():
if (inspect.isclass(o)
and o.__module__.startswith(module_prefix)):
yield o
def get_functions(module):
# Enumerate functions from a module
# Normal functions
for k, o in module.__dict__.items():
if (isinstance(o, types.FunctionType)
and o.__module__.startswith(module_prefix)):
yield o
# Methods defined in a class
for cls in get_classes(module):
if cls.__module__.startswith(module_prefix):
for k, o in cls.__dict__.items():
if inspect.isfunction(o):
yield o
def get_default_arguments(func):
# Retrieves the defaults arguments (names and values) of a function.
if six.PY2:
# Python 2
spec = inspect.getargspec(func)
if spec.defaults is not None:
n = len(spec.defaults)
for name, default_value in zip(spec.args[-n:], spec.defaults):
yield name, default_value
else:
# Python 3
signature = inspect.signature(func)
for name, param in signature.parameters.items():
if param.default is not inspect.Parameter.empty:
yield name, param.default
class TestFunctions(unittest.TestCase):
def test_no_mutable_default_args(self):
type_blacklist = (list, dict)
badlist = []
# Collect mutable default arguments
for mod in walk_modules():
for func in get_functions(mod):
for arg_name, value in get_default_arguments(func):
if isinstance(value, type_blacklist):
badlist.append((func, arg_name, type(value)))
if len(badlist) > 0:
# Report the error
s = six.StringIO()
s.write(
'Some functions have mutable values as default arguments:\n\n')
for func, arg_name, value_type in badlist:
s.write('{}.{}: arg=\'{}\' type={}\n'.format(
func.__module__, func.__name__, arg_name, value_type))
assert False, s.getvalue()
testing.run_module(__name__, __file__)
| 2,776
| 27.628866
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/conftest.py
|
import pytest
from chainer.backends import cuda
import chainerx
if not chainerx.is_available():
# Skip all ChainerX tests if ChainerX is unavailable.
# TODO(kmaehashi) This is an tentative fix. This file should be removed
# once chainer-test supports ChainerX.
pytest.mark.chainerx = pytest.mark.skip
def pytest_runtest_teardown(item, nextitem):
if cuda.available:
assert cuda.cupy.cuda.runtime.getDevice() == 0
# testing.run_module(__name__, __file__)
| 488
| 23.45
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_runtime_info.py
|
import unittest
import six
import chainer
from chainer import _runtime_info
from chainer import testing
class TestRuntimeInfo(unittest.TestCase):
def test_get_runtime_info(self):
info = _runtime_info._get_runtime_info()
assert chainer.__version__ in str(info)
def test_print_runtime_info(self):
out = six.StringIO()
_runtime_info.print_runtime_info(out)
assert out.getvalue() == str(_runtime_info._get_runtime_info())
testing.run_module(__name__, __file__)
| 512
| 22.318182
| 71
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_sequential.py
|
import functools
import os
import tempfile
import unittest
import mock
import numpy
import pytest
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer import variable
class TestSequential(unittest.TestCase):
def setUp(self):
self.l1 = links.Linear(None, 3)
self.l2 = links.Linear(3, 2)
self.l3 = links.Linear(2, 3)
# s1: l1 -> l2
self.s1 = chainer.Sequential(self.l1)
self.s1.append(self.l2)
# s2: s1 (l1 -> l2) -> l3
self.s2 = chainer.Sequential(self.s1)
self.s2.append(self.l3)
def test_init(self):
self.assertIs(self.s1[0], self.l1)
self.assertEqual(self.l1.name, '0')
self.assertIs(self.s2[0], self.s1)
self.assertEqual(self.s1.name, '0')
with self.assertRaises(ValueError):
chainer.Sequential(0)
def test_append(self):
self.assertIs(self.s2[1], self.l3)
self.assertEqual(self.l2.name, '1')
def test_iter(self):
links = list(self.s2)
self.assertEqual(2, len(links))
self.assertIs(links[0], self.s1)
self.assertIs(links[1], self.l3)
def test_len(self):
self.assertIs(len(self.s1), 2)
self.assertIs(len(self.s2), 2)
def test_copy(self):
s2 = self.s2.copy()
self.assertIs(s2.name, None)
self.assertIsInstance(s2._children, list)
self.assertIsNot(s2[0], self.s1)
self.assertEqual(s2[0].name, '0')
self.assertIsInstance(s2[0]._children, list)
self.assertIsNot(s2[0][0], self.l1)
self.assertEqual(s2[0][0].name, '0')
self.assertIsNot(s2[0][0].b, self.l1.b)
self.assertIs(s2[0][0].b.data, self.l1.b.data)
self.assertIs(s2[0][0].b.grad, None)
self.assertIsNot(s2[0][1], self.l2)
self.assertEqual(s2[0][1].name, '1')
self.assertIsNot(s2[0][1].W, self.l2.W)
self.assertIs(s2[0][1].W.data, self.l2.W.data)
self.assertIs(s2[0][1].W.grad, None)
self.assertIsNot(s2[1], self.l3)
self.assertEqual(s2[1].name, '1')
self.assertIsNot(s2[1].W, self.l3.W)
self.assertIs(s2[1].W.data, self.l3.W.data)
self.assertIs(s2[1].W.grad, None)
def test_copy_with_nonparametric_function(self):
self.s1.insert(1, functions.relu)
# l1 -> relu -> l2
# The default copy mode is 'share'
s1 = self.s1.copy()
self.assertIsNot(s1[0], self.s1[0]) # l1
self.assertIs(s1[1], self.s1[1]) # relu
self.assertIsNot(s1[2], self.s1[2]) # l2
# parameters of l1
self.assertIsNot(s1[0].W, self.s1[0].W)
self.assertIsNot(s1[0].b, self.s1[0].b)
# W of the first link has not been initialized
self.assertIs(s1[0].W.array, None)
self.assertIs(s1[0].W.grad, None)
# The bias is initialized
self.assertIs(s1[0].b.array, self.s1[0].b.array)
self.assertIs(s1[0].b.grad, None)
# The copied Function should be identical
self.assertIs(s1[1], self.s1[1])
# parameters of l2
self.assertIsNot(s1[2].W, self.s1[2].W)
self.assertIsNot(s1[2].b, self.s1[2].b)
self.assertIs(s1[2].W.array, self.s1[2].W.array)
self.assertIs(s1[2].W.grad, None)
self.assertIs(s1[2].b.array, self.s1[2].b.array)
self.assertIs(s1[2].b.grad, None)
@attr.gpu
def test_copy_and_send_to_gpu(self):
s2 = self.s2.copy()
with testing.assert_warns(DeprecationWarning):
self.s2.to_gpu()
self.assertIsInstance(self.s2[0][0].b.data, cuda.cupy.ndarray)
self.assertIsInstance(self.s2[0][1].W.data, cuda.cupy.ndarray)
self.assertIsInstance(s2[0][0].b.data, numpy.ndarray)
self.assertIsInstance(s2[0][1].W.data, numpy.ndarray)
@attr.gpu
def test_copy_and_send_to_gpu_2(self):
s2 = self.s2.copy()
with testing.assert_warns(DeprecationWarning):
s2.to_gpu()
self.assertIsInstance(self.s2[0][0].b.data, numpy.ndarray)
self.assertIsInstance(self.s2[0][1].W.data, numpy.ndarray)
self.assertIsInstance(s2[0][0].b.data, cuda.cupy.ndarray)
self.assertIsInstance(s2[0][1].W.data, cuda.cupy.ndarray)
@attr.multi_gpu(2)
def test_copy_and_send_to_gpu_multi(self):
s2 = self.s2.copy()
with testing.assert_warns(DeprecationWarning):
self.s2.to_gpu(0)
with testing.assert_warns(DeprecationWarning):
s2.to_gpu(1)
self.assertEqual(self.s2[0][0].b.data.device.id, 0)
self.assertEqual(self.s2[0][1].W.data.device.id, 0)
self.assertEqual(s2[0][0].b.data.device.id, 1)
self.assertEqual(s2[0][1].W.data.device.id, 1)
def test_to_cpu_on_cpu(self):
x1 = self.l1.b.data
gx1 = self.l1.b.grad
x2 = self.l2.W.data
gx2 = self.l2.W.grad
x3 = self.l3.W.data
gx3 = self.l3.W.grad
with testing.assert_warns(DeprecationWarning):
self.s2.to_cpu()
self.assertIs(self.l1.b.data, x1)
self.assertIs(self.l1.b.grad, gx1)
self.assertIs(self.l2.W.data, x2)
self.assertIs(self.l2.W.grad, gx2)
self.assertIs(self.l3.W.data, x3)
self.assertIs(self.l3.W.grad, gx3)
@attr.gpu
def test_to_cpu(self):
with testing.assert_warns(DeprecationWarning):
self.s2.to_gpu()
with testing.assert_warns(DeprecationWarning):
self.s2.to_cpu()
self.assertIs(self.s2.xp, numpy)
self.assertIs(self.s1.xp, numpy)
self.assertIs(self.l1.xp, numpy)
self.assertIs(self.l2.xp, numpy)
self.assertIs(self.l3.xp, numpy)
self.assertIsInstance(self.l1.b.data, numpy.ndarray)
self.assertIsInstance(self.l1.b.grad, numpy.ndarray)
self.assertIsInstance(self.l2.W.data, numpy.ndarray)
self.assertIsInstance(self.l2.W.grad, numpy.ndarray)
self.assertIsInstance(self.l3.W.data, numpy.ndarray)
self.assertIsInstance(self.l3.W.grad, numpy.ndarray)
@attr.gpu
def test_to_gpu(self):
cupy = cuda.cupy
with testing.assert_warns(DeprecationWarning):
self.s2.to_gpu()
self.assertIs(self.s2.xp, cupy)
self.assertIs(self.s1.xp, cupy)
self.assertIs(self.l1.xp, cupy)
self.assertIs(self.l2.xp, cupy)
self.assertIs(self.l3.xp, cupy)
self.assertIsInstance(self.l1.b.data, cupy.ndarray)
self.assertIsInstance(self.l1.b.grad, cupy.ndarray)
self.assertIsInstance(self.l2.W.data, cupy.ndarray)
self.assertIsInstance(self.l2.W.grad, cupy.ndarray)
self.assertIsInstance(self.l3.W.data, cupy.ndarray)
self.assertIsInstance(self.l3.W.grad, cupy.ndarray)
def test_params(self):
params = list(self.s2.params())
self.assertEqual({id(p) for p in params},
{id(self.l1.W), id(self.l1.b),
id(self.l2.W), id(self.l2.b),
id(self.l3.W), id(self.l3.b)})
def test_params_skip_uninit(self):
params = list(self.s2.params(include_uninit=False))
self.assertEqual({id(p) for p in params},
{id(self.l1.b), id(self.l2.W), id(self.l2.b),
id(self.l3.W), id(self.l3.b)})
def test_namedparams(self):
namedparams = list(self.s2.namedparams())
self.assertEqual({(name, id(p)) for name, p in namedparams},
{('/0/0/W', id(self.l1.W)),
('/0/0/b', id(self.l1.b)),
('/0/1/W', id(self.l2.W)),
('/0/1/b', id(self.l2.b)),
('/1/W', id(self.l3.W)),
('/1/b', id(self.l3.b))})
def test_namedparams_skip_uninit(self):
namedparams = list(self.s2.namedparams(include_uninit=False))
self.assertEqual({(name, id(p)) for name, p in namedparams},
{('/0/0/b', id(self.l1.b)),
('/0/1/W', id(self.l2.W)),
('/0/1/b', id(self.l2.b)),
('/1/W', id(self.l3.W)),
('/1/b', id(self.l3.b))})
def test_links(self):
links = list(self.s2.links())
self.assertEqual({id(l) for l in links},
{id(l) for l in [self.l1, self.l2, self.l3,
self.s1, self.s2]})
def test_links_skipself(self):
links = list(self.s2.links(skipself=True))
self.assertEqual({id(l) for l in links},
{id(l) for l in [self.l1, self.l2, self.l3, self.s1]})
def test_namedlinks(self):
namedlinks = list(self.s2.namedlinks())
self.assertEqual({(name, id(l)) for name, l in namedlinks},
{('/', id(self.s2)),
('/0', id(self.s1)),
('/0/0', id(self.l1)),
('/0/1', id(self.l2)),
('/1', id(self.l3))})
def test_namedlinks_skipself(self):
namedlinks = list(self.s2.namedlinks(skipself=True))
self.assertEqual({(name, id(l)) for name, l in namedlinks},
{('/0', id(self.s1)),
('/0/0', id(self.l1)),
('/0/1', id(self.l2)),
('/1', id(self.l3))})
def test_children(self):
self.assertEqual(tuple(id(c) for c in self.s2.children()),
(id(self.s1), id(self.l3)))
self.assertEqual(tuple(id(c) for c in self.s1.children()),
(id(self.l1), id(self.l2)))
def test_copyparams(self):
l1 = links.Linear(None, 3)
l2 = links.Linear(3, 2)
l3 = links.Linear(2, 3)
s1 = chainer.Sequential(l1, l2)
s2 = chainer.Sequential(s1, l3)
l1.b.data.fill(0)
l2.W.data.fill(1)
l2.b.data.fill(2)
l3.W.data.fill(3)
l3.b.data.fill(4)
self.s2.copyparams(s2)
numpy.testing.assert_array_equal(self.l1.b.data, l1.b.data)
numpy.testing.assert_array_equal(self.l2.W.data, l2.W.data)
numpy.testing.assert_array_equal(self.l2.b.data, l2.b.data)
numpy.testing.assert_array_equal(self.l3.W.data, l3.W.data)
numpy.testing.assert_array_equal(self.l3.b.data, l3.b.data)
def test_zerograds(self):
with testing.assert_warns(DeprecationWarning):
self.s2.zerograds()
numpy.testing.assert_array_equal(self.l1.b.grad, numpy.zeros((3,)))
numpy.testing.assert_array_equal(
self.l2.W.grad, numpy.zeros((2, 3)))
numpy.testing.assert_array_equal(
self.l3.W.grad, numpy.zeros((3, 2)))
self.l1.W.initialize((3, 2))
numpy.testing.assert_array_equal(
self.l1.W.grad, numpy.zeros((3, 2)))
def test_cleargrads(self):
self.s2.cleargrads()
self.assertIsNone(self.l1.b.grad)
self.assertIsNone(self.l2.W.grad)
self.assertIsNone(self.l2.b.grad)
self.assertIsNone(self.l3.W.grad)
self.assertIsNone(self.l3.b.grad)
self.l1.W.initialize((2, 3))
self.assertIsNone(self.l1.W.grad)
def test_addgrads(self):
l1 = links.Linear(2, 3)
l2 = links.Linear(3, 2)
l3 = links.Linear(2, 3)
s1 = chainer.Sequential(l1, l2)
s2 = chainer.Sequential(s1, l3)
l1.b.grad.fill(1)
l2.W.grad.fill(2)
l2.b.grad.fill(3)
l3.W.grad.fill(4)
l3.b.grad.fill(5)
l1.W.grad.fill(6)
self.l1.b.grad.fill(-1)
self.l2.W.grad.fill(-2)
self.l2.b.grad.fill(-3)
self.l3.W.grad.fill(-4)
self.l3.b.grad.fill(-5)
self.l1.W.cleargrad()
self.s2.addgrads(s2)
numpy.testing.assert_array_equal(self.l1.b.grad, numpy.zeros((3,)))
numpy.testing.assert_array_equal(self.l1.W.grad, l1.W.grad)
numpy.testing.assert_array_equal(self.l2.W.grad, numpy.zeros((2, 3)))
numpy.testing.assert_array_equal(self.l2.b.grad, numpy.zeros((2,)))
numpy.testing.assert_array_equal(self.l3.W.grad, numpy.zeros((3, 2)))
numpy.testing.assert_array_equal(self.l3.b.grad, numpy.zeros((3,)))
def test_serialize(self):
l1 = links.Linear(None, 1)
l2 = links.Linear(None, 3)
with l2.init_scope():
l2.x = variable.Parameter(0, 2)
s1 = chainer.Sequential(l1, l2)
mocks = {'0': mock.MagicMock(), '1': mock.MagicMock()}
serializer = mock.MagicMock()
serializer.__getitem__.side_effect = lambda k: mocks[k]
serializer.return_value = None
mocks['0'].return_value = None
mocks['1'].return_value = None
s1.serialize(serializer)
self.assertEqual(serializer.call_count, 0)
self.assertEqual(serializer.__getitem__.call_count, 2)
serializer.__getitem__.assert_any_call('0')
serializer.__getitem__.assert_any_call('1')
mocks['0'].assert_any_call('W', None)
mocks['0'].assert_any_call('b', l1.b.data)
mocks['1'].assert_any_call('W', None)
mocks['1'].assert_any_call('b', l2.b.data)
mocks['1'].assert_any_call('x', l2.x.data)
def test_getitem(self):
self.assertIs(self.s1[0], self.l1)
def test_delitem(self):
del self.s1[0]
self.assertIsNot(self.s1[0], self.l1)
self.assertIs(self.s1[0], self.l2)
def test_reversed(self):
layers = list(reversed(self.s2))
self.assertIs(layers[0], self.l3)
self.assertIs(layers[1], self.s1)
def test_contains(self):
self.assertTrue(self.l1 in self.s1)
self.assertTrue(self.l2 in self.s1)
self.assertTrue(self.s1 in self.s2)
self.assertTrue(self.l3 in self.s2)
self.assertFalse(self.l3 in self.s1)
self.assertFalse(self.l2 in self.s2)
def test_add(self):
l1 = links.Linear(3, 2)
l2 = functions.relu
other = chainer.Sequential(l1, l2)
added = self.s2 + other
self.assertEqual(len(added), 4)
self.assertIs(added[0], self.s1)
self.assertIs(added[1], self.l3)
self.assertIs(added[2], l1)
self.assertIs(added[3], l2)
with self.assertRaises(ValueError):
self.s2 + 0
def test_iadd(self):
l4 = links.Linear(3, 1)
self.s2 += chainer.Sequential(l4)
self.assertIs(self.s2[0], self.s1)
self.assertIs(self.s2[1], self.l3)
self.assertIs(self.s2[2], l4)
with self.assertRaises(ValueError):
self.s2 += 0
def test_call(self):
l1 = mock.MagicMock()
l2 = mock.MagicMock()
l3 = mock.MagicMock()
model = chainer.Sequential(l1, l2, l3)
x = numpy.arange(2).reshape(1, 2).astype('f')
y = model(x)
l1.assert_called_once()
l2.assert_called_once()
l3.assert_called_once()
y = self.s1(x)
self.assertIs(y.creator.inputs[1].data, self.l2.W.data)
def test_call_with_multiple_inputs(self):
model = chainer.Sequential(
lambda x, y: (x * 2, y * 3, x + y),
lambda x, y, z: x + y + z
)
y = model(2, 3)
self.assertEqual(y, 18)
def test_extend(self):
l1 = links.Linear(3, 2)
l2 = links.Linear(2, 3)
s3 = chainer.Sequential(l1, l2)
self.s2.extend(s3)
self.assertEqual(len(self.s2), 4)
self.assertIs(self.s2[2], s3[0])
self.assertIs(self.s2[3], s3[1])
def test_remove(self):
self.s2.remove(self.s1)
self.assertEqual(len(self.s2), 1)
self.assertIs(self.s2[0], self.l3)
def test_remove_by_layer_type(self):
self.s2.insert(2, functions.relu)
self.s2.remove_by_layer_type('Linear')
self.assertEqual(len(self.s2), 2)
self.assertIs(self.s2[0], self.s1)
self.assertIs(self.s2[1], functions.relu)
def test_pop(self):
l3 = self.s2.pop(1)
self.assertIs(l3, self.l3)
self.assertEqual(len(self.s2), 1)
def test_clear(self):
self.s2.clear()
self.assertEqual(len(self.s2), 0)
def test_index(self):
self.assertEqual(self.s2.index(self.s1), 0)
self.assertEqual(self.s2.index(self.l3), 1)
def test_count(self):
self.s2.insert(1, functions.relu)
self.s2.insert(3, functions.relu)
self.assertEqual(self.s2.count(functions.relu), 2)
self.assertEqual(self.s2.count(self.s1), 1)
self.assertEqual(self.s2.count(self.l3), 1)
self.s2.append(self.l3)
self.assertEqual(self.s2.count(self.l3), 2)
def test_count_by_layer_type(self):
self.assertEqual(self.s2.count_by_layer_type('Linear'), 1)
self.s2.insert(1, functions.relu)
self.s2.insert(3, functions.relu)
self.assertEqual(self.s2.count_by_layer_type('relu'), 2)
def test_pickle_without_lambda(self):
fd, path = tempfile.mkstemp()
six.moves.cPickle.dump(self.s2, open(path, 'wb'))
s2 = six.moves.cPickle.load(open(path, 'rb'))
self.assertEqual(len(s2), len(self.s2))
numpy.testing.assert_array_equal(s2[0][0].b.data, self.s2[0][0].b.data)
numpy.testing.assert_array_equal(s2[0][1].W.data, self.s2[0][1].W.data)
numpy.testing.assert_array_equal(s2[0][1].b.data, self.s2[0][1].b.data)
numpy.testing.assert_array_equal(s2[1].W.data, self.s2[1].W.data)
numpy.testing.assert_array_equal(s2[1].b.data, self.s2[1].b.data)
for l1, l2 in zip(s2, self.s2):
self.assertIsNot(l1, l2)
os.close(fd)
os.remove(path)
def test_pickle_with_lambda(self):
self.s2.append(lambda x: x)
with self.assertRaises(ValueError):
with tempfile.TemporaryFile() as fp:
six.moves.cPickle.dump(self.s2, fp)
def test_str(self):
self.assertEqual(str(chainer.Sequential()), 'Sequential()')
expected = '''\
(0): Sequential(
(0): Linear(in_size=None, out_size=3, nobias=False),
(1): Linear(in_size=3, out_size=2, nobias=False),
),
(1): Linear(in_size=2, out_size=3, nobias=False),
(2): lambda x: functions.leaky_relu(x, slope=0.2),
'''
layers = [
self.s1,
self.l3,
lambda x: functions.leaky_relu(x, slope=0.2),
]
if six.PY3:
# In Python2, it fails because of different id of the function.
layer = functools.partial(functions.leaky_relu, slope=0.2)
layers.append(layer)
expected += ' (3): %s,\n' % layer
expected = 'Sequential(\n%s)' % expected
s = chainer.Sequential(*layers)
self.assertEqual(str(s), expected)
def test_repeat_with_init(self):
# s2 ((l1 -> l2) -> l3) -> s2 ((l1 -> l2) -> l3)
ret = self.s2.repeat(2)
self.assertIsNot(ret[0], self.s2)
self.assertIs(type(ret[0]), type(self.s2))
self.assertIsNot(ret[1], self.s2)
self.assertIs(type(ret[1]), type(self.s2))
# b is filled with 0, so they should have the same values
numpy.testing.assert_array_equal(
ret[0][0][0].b.array, ret[1][0][0].b.array)
# W is initialized randomly, so they should be different
self.assertFalse(
numpy.array_equal(ret[0][1].W.array, self.l3.W.array))
# And the object should also be different
self.assertIsNot(ret[0][1].W.array, self.l3.W.array)
# Repeated elements should be different objects
self.assertIsNot(ret[0], ret[1])
# Also for the arrays
self.assertIsNot(ret[0][1].W.array, ret[1][1].W.array)
# And values should be different
self.assertFalse(
numpy.array_equal(ret[0][1].W.array, ret[1][1].W.array))
self.assertEqual(len(ret), 2)
ret = self.s2.repeat(0, mode='init')
self.assertEqual(len(ret), 0)
def test_repeat_with_copy(self):
# s2 ((l1 -> l2) -> l3) -> s2 ((l1 -> l2) -> l3)
ret = self.s2.repeat(2, mode='copy')
self.assertIsNot(ret[0], self.s2)
self.assertIs(type(ret[0]), type(self.s2))
self.assertIsNot(ret[1], self.s2)
self.assertIs(type(ret[1]), type(self.s2))
self.assertIsNot(ret[0], ret[1])
# b is filled with 0, so they should have the same values
numpy.testing.assert_array_equal(
ret[0][0][0].b.array, ret[1][0][0].b.array)
# W is shallowy copied, so the values should be same
numpy.testing.assert_array_equal(ret[0][1].W.array, self.l3.W.array)
# But the object should be different
self.assertIsNot(ret[0][1].W.array, self.l3.W.array)
# Repeated elements should be different objects
self.assertIsNot(ret[0][0], ret[1][0])
# Also for the arrays
self.assertIsNot(ret[0][1].W.array, ret[1][1].W.array)
# But the values should be same
numpy.testing.assert_array_equal(ret[0][1].W.array, ret[1][1].W.array)
self.assertEqual(len(ret), 2)
ret = self.s2.repeat(0, mode='copy')
self.assertEqual(len(ret), 0)
def test_repeat_with_share(self):
# s2 ((l1 -> l2) -> l3) -> s2 ((l1 -> l2) -> l3)
ret = self.s2.repeat(2, mode='share')
self.assertIsNot(ret[0], self.s2)
self.assertIs(type(ret[0]), type(self.s2))
self.assertIsNot(ret[1], self.s2)
self.assertIs(type(ret[1]), type(self.s2))
# b is filled with 0, so they should have the same values
numpy.testing.assert_array_equal(
ret[0][0][0].b.data, ret[1][0][0].b.data)
# W is shallowy copied, so the values should be same
numpy.testing.assert_array_equal(ret[0][1].W.array, self.l3.W.array)
numpy.testing.assert_array_equal(ret[1][1].W.array, self.l3.W.array)
# And the object should also be same
self.assertIs(ret[0][1].W.array, self.l3.W.array)
self.assertIs(ret[1][1].W.array, self.l3.W.array)
# Repeated element itself should be different
self.assertIsNot(ret[0], ret[1])
self.assertEqual(len(ret), 2)
ret = self.s2.repeat(0, mode='share')
self.assertEqual(len(ret), 0)
def test_flatten(self):
flattened_s2 = self.s2.flatten()
self.assertIs(flattened_s2[0], self.l1)
self.assertIs(flattened_s2[1], self.l2)
self.assertIs(flattened_s2[2], self.l3)
class TestEmptySequential(unittest.TestCase):
def test_empty_sequential(self):
seq = chainer.Sequential()
x = numpy.ones((2, 3), numpy.float32)
with pytest.raises(RuntimeError):
seq(x)
@testing.parameterize_pytest(
'expect_error,orig,pos,is_link', [
# Insertion into an empty sequential
(False, (), 0, False),
(False, (), 0, True),
# Insertion into a sequential with 1 element
(False, (False,), 0, False),
(False, (True,), 0, True),
(False, (False,), 1, False),
(False, (True,), 1, True),
(False, (False,), -1, False),
(False, (True,), -1, True),
# Insertion into a sequential with multiple elements
(False, (False, False), -1, False),
(False, (True, True), -1, True),
(False, (False, False), 1, False),
(False, (True, True), 1, True),
# Index error expected
(True, (), 1, True),
(True, (), -1, False),
(True, (True,), 2, True),
(True, (False,), -2, False),
]
)
class TestSequentialInsert(unittest.TestCase):
def test_insert(self):
funcs = [
functions.sin,
functions.cos,
functions.tan,
]
# Prepare the original sequential before insertion.
orig = []
for orig_is_link in self.orig:
if orig_is_link:
orig.append(links.Linear((3, 3)))
else:
orig.append(funcs.pop(0))
# The subject of insertion
if self.is_link:
subj = links.Linear((3, 3))
else:
subj = funcs.pop(0)
# Instantiate the sequential
seq = chainer.Sequential(*orig)
if self.expect_error:
with pytest.raises(IndexError):
seq.insert(self.pos, subj)
else:
seq.insert(self.pos, subj)
# Inserting to the `orig` similarly for the following comparison
orig.insert(self.pos, subj)
assert len(seq) == len(self.orig) + 1
for i in range(len(self.orig) + 1):
assert seq[i] is orig[i]
testing.run_module(__name__, __file__)
| 24,937
| 35.945185
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_link.py
|
import copy
import unittest
import warnings
import mock
import numpy
import pytest
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import initializers
from chainer import testing
from chainer.testing import attr
import chainerx
def _assert_variable_array_equal(var, expected_array):
assert var.shape == expected_array.shape
assert var.dtype == expected_array.dtype
_assert_arrays_equal(var.data, expected_array)
def _assert_arrays_equal(array, expected_array):
array = backend.CpuDevice().send(array)
assert array.shape == expected_array.shape
assert array.dtype == expected_array.dtype
assert (array == expected_array).all()
def _shaped_random(shape, dtype):
if isinstance(shape, int):
shape = (shape,)
return numpy.asarray(numpy.random.randn(*shape)).astype(dtype)
_inject_backend_tests = testing.backend.inject_backend_tests(
None,
[
# NumPy
{},
# iDeep
{'use_ideep': 'always'},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
_inject_backend_tests_no_intel64 = testing.backend.inject_backend_tests(
None,
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class LinkTestBase(object):
def setUp(self):
x_shape_0 = 2
x_shape_1 = numpy.int64(3)
self.link = chainer.Link(x=((x_shape_0, x_shape_1), 'd'),
u=(None, 'd'))
with self.link.init_scope():
self.link.y = chainer.Parameter(shape=(2,))
self.link.v = chainer.Parameter()
self.p = numpy.array([1, 2, 3], dtype='f')
self.link.add_persistent('p', self.p)
self.link.name = 'a'
self.link.x.update_rule = chainer.UpdateRule()
self.link.x.update_rule.enabled = False
self.link.u.update_rule = chainer.UpdateRule()
if cuda.available:
self.current_device_id = cuda.cupy.cuda.get_device_id()
def tearDown(self):
if cuda.available \
and cuda.cupy.cuda.get_device_id() != self.current_device_id:
cuda.Device(self.current_device_id).use()
def check_param_init(self, name, shape, dtype, data_value=numpy.nan):
self.assertTrue(hasattr(self.link, name))
var = getattr(self.link, name)
self.assertEqual(var.name, name)
self.assertIsInstance(var, chainer.Parameter)
self.assertEqual(var.data.shape, shape)
self.assertEqual(var.data.dtype, dtype)
numpy.testing.assert_array_equal(
backend.CpuDevice().send(var.data), data_value)
self.assertEqual(var.grad.shape, shape)
self.assertEqual(var.grad.dtype, dtype)
numpy.testing.assert_array_equal(
backend.CpuDevice().send(var.grad), numpy.nan)
def check_param_uninit(self, name, initializer=None):
self.assertTrue(hasattr(self.link, name))
var = getattr(self.link, name)
self.assertIsInstance(var, chainer.Parameter)
self.assertEqual(var.name, name)
self.assertIsNone(var.data)
if initializer is not None:
self.assertIs(var.initializer, initializer)
class TestLink(LinkTestBase, unittest.TestCase):
def test_init(self):
self.check_param_init('x', (2, 3), 'd')
self.check_param_init('y', (2,), 'f')
self.check_param_uninit('u')
self.link.u.initialize((2, 3))
self.check_param_init('u', (2, 3), 'd')
self.check_param_uninit('v')
self.link.v.initialize((2, 3))
self.check_param_init('v', (2, 3), 'f')
def test_str(self):
# empty Link
self.assertEqual(str(chainer.Link()), 'Link()')
class MyLink(chainer.Link):
pass
# Link without overriding printable_specs
self.assertEqual(str(MyLink()), 'MyLink()')
class LinearForTest(chainer.Link):
def __init__(self, in_size, out_size, nobias=False):
self.in_size = in_size
self.out_size = out_size
self.nobias = nobias
@property
def printable_specs(self):
specs = [
('in_size', self.in_size),
('out_size', self.out_size),
('nobias', self.nobias)
]
for spec in specs:
yield spec
def __call__(self):
pass
# Link with overriding printable_specs
self.assertEqual(
str(LinearForTest(10, 1)),
'LinearForTest(in_size=10, out_size=1, nobias=False)',
)
def test_assign_param_outside_of_init_scope(self):
p = chainer.Parameter()
self.link.p = p
self.assertTrue(all(p is not param for param in self.link.params()))
def test_assign_var_in_init_scope(self):
p = chainer.Variable()
with self.link.init_scope():
self.link.p = p
self.assertTrue(all(p is not param for param in self.link.params()))
def test_call_injected_with_mixin(self):
call = mock.MagicMock()
call.return_value = 3
class CallMixin(object):
__call__ = call
class InjectedLink(chainer.Link, CallMixin):
pass
link = InjectedLink()
ret = link(1, a=2)
call.assert_called_once_with(1, a=2)
assert ret == call.return_value
def test_add_param(self):
self.link.add_param('z', (2, 3))
self.check_param_init('z', (2, 3), 'f')
self.link.add_param('w', (2, 3), dtype='d')
self.check_param_init('w', (2, 3), 'd')
self.link.add_param('r')
self.check_param_uninit('r')
self.link.r.initialize((2, 3))
self.check_param_init('r', (2, 3), 'f')
self.link.add_param('s', dtype='d')
self.check_param_uninit('s')
self.link.s.initialize((2, 3))
self.check_param_init('s', (2, 3), 'd')
initializer = initializers.Zero('d')
self.link.add_param('t', initializer=initializer)
self.check_param_uninit('t', initializer)
self.link.t.initialize((2, 3))
self.check_param_init('t', (2, 3), 'd', 0)
def test_add_param_direct_initialization(self):
z = numpy.random.rand(2, 3).astype('f')
self.link.add_param('z', initializer=z)
self.assertIsInstance(self.link.z.data, numpy.ndarray)
numpy.testing.assert_array_equal(self.link.z.data, z)
def test_add_param_duplicated_with_persistent(self):
self.link.add_persistent('z', 'abc')
with self.assertRaises(AttributeError):
self.link.add_param('z', (2, 3))
def test_add_persistent(self):
self.assertTrue(hasattr(self.link, 'p'))
self.assertIs(self.link.p, self.p)
self.link.add_persistent('q', 'abc')
self.assertTrue(hasattr(self.link, 'q'))
self.assertEqual(self.link.q, 'abc')
def test_delete(self):
del self.link.x
self.assertFalse(hasattr(self.link, 'x'))
self.assertNotIn('x', self.link._params)
self.assertNotIn('x', self.link._persistent)
del self.link.p
self.assertFalse(hasattr(self.link, 'p'))
self.assertNotIn('p', self.link._params)
self.assertNotIn('p', self.link._persistent)
def test_copy_with_share_mode(self):
link = self.link.copy(mode='share')
self.assertIsInstance(link._params, set)
self.assertIsInstance(link._persistent, set)
self.assertTrue(hasattr(link, 'x'))
self.assertTrue(hasattr(link, 'y'))
self.assertTrue(hasattr(link, 'u'))
self.assertTrue(hasattr(link, 'p'))
self.assertIsNot(link.x, self.link.x)
self.assertIs(link.x.array, self.link.x.array)
self.assertIsNot(link.y, self.link.y)
self.assertIs(link.y.array, self.link.y.array)
self.assertIsNone(link.u.array)
self.assertIs(link.p, self.link.p)
self.assertIs(link.name, None)
def test_copy_with_copy_mode(self):
link = self.link.copy(mode='copy')
self.assertIsInstance(link._params, set)
self.assertIsInstance(link._persistent, set)
self.assertTrue(hasattr(link, 'x'))
self.assertTrue(hasattr(link, 'y'))
self.assertTrue(hasattr(link, 'u'))
self.assertTrue(hasattr(link, 'p'))
self.assertIsNot(link.x, self.link.x)
self.assertIsNot(link.x.array, self.link.x.array)
self.assertIsNot(link.y, self.link.y)
self.assertIsNot(link.y.array, self.link.y.array)
self.assertIsNone(link.u.array)
self.assertIsNot(link.p, self.link.p)
self.assertIsNot(link.name, None)
def test_copy_with_init_mode(self):
self.link.u.initializer = initializers.Normal(
dtype=self.link.u.initializer.dtype)
self.link.u.initialize((2, 3))
link = self.link.copy(mode='init')
self.assertFalse(numpy.array_equal(self.link.u.array, link.u.array))
self.assertIsInstance(link._params, set)
self.assertIsInstance(link._persistent, set)
self.assertTrue(hasattr(link, 'x'))
self.assertTrue(hasattr(link, 'y'))
self.assertTrue(hasattr(link, 'u'))
self.assertTrue(hasattr(link, 'p'))
self.assertIsNot(link.x, self.link.x)
self.assertIsNot(link.x.array, self.link.x.array)
self.assertIsNot(link.y, self.link.y)
self.assertIsNot(link.y.array, self.link.y.array)
self.assertIsNot(link.p, self.link.p)
self.assertIsNot(link.name, None)
@attr.gpu
def test_copy_and_to_gpu_init(self):
cupy = cuda.cupy
l0 = self.link
l1 = l0.copy()
self.assertIs(l0.x.data, l1.x.data)
with testing.assert_warns(DeprecationWarning):
l1.to_gpu()
self.assertIsNot(l0.x.data, l1.x.data)
self.assertIsInstance(l0.x.data, numpy.ndarray)
self.assertIsInstance(l1.x.data, cupy.ndarray)
@attr.gpu
def test_copy_and_to_gpu_uninit(self):
cupy = cuda.cupy
l0 = self.link
l1 = l0.copy()
self.assertIs(l0.device.xp, numpy)
self.assertIsNone(l0.u.data)
self.assertIsNone(l1.u.data)
with testing.assert_warns(DeprecationWarning):
l1.to_gpu()
self.assertIs(l0.device.xp, numpy)
self.assertIsNone(l0.u.data)
l1.u.initialize((2, 3))
self.assertIsNone(l0.u.data)
self.assertIsInstance(l1.u.data, cupy.ndarray)
@attr.multi_gpu(2)
def test_copy_and_to_gpu_uninit_multi_gpu(self):
cupy = cuda.cupy
l0 = self.link
l1 = l0.copy()
l2 = l0.copy()
self.assertIsNone(l0.u.data)
self.assertIsNone(l1.u.data)
self.assertIsNone(l2.u.data)
with testing.assert_warns(DeprecationWarning):
l1.to_gpu()
l1.u.initialize((2, 3))
with testing.assert_warns(DeprecationWarning):
l2.to_gpu()
l2.u.initialize((2, 3))
self.assertIsNone(l0.u.data)
self.assertIsInstance(l1.u.data, cupy.ndarray)
self.assertIsInstance(l2.u.data, cupy.ndarray)
self.assertNotEqual(l1.u.data.data, l2.u.data.data)
def _check_deepcopy(self, link):
self.assertIsInstance(link._params, set)
self.assertIsInstance(link._persistent, set)
self.assertTrue(hasattr(link, 'x'))
self.assertTrue(hasattr(link, 'y'))
self.assertTrue(hasattr(link, 'u'))
self.assertTrue(hasattr(link, 'p'))
self.assertIsNot(link.x, self.link.x)
self.assertIsNot(link.x.data, self.link.x.data)
numpy.testing.assert_array_equal(cuda.to_cpu(link.x.data),
cuda.to_cpu(self.link.x.data))
self.assertIsNot(link.y, self.link.y)
self.assertIsNot(link.y.data, self.link.y.data)
numpy.testing.assert_array_equal(cuda.to_cpu(link.y.data),
cuda.to_cpu(self.link.y.data))
self.assertIsNone(link.u.data)
self.assertIsNot(link.p, self.link.p)
self.assertEqual(link.name, self.link.name)
def test_deepcopy(self):
link = copy.deepcopy(self.link)
self._check_deepcopy(link)
self.assertEqual(link.device.xp, numpy)
@attr.multi_gpu(2)
def test_deepcopy_multi_device(self):
device_id = 1
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu(device_id)
link = copy.deepcopy(self.link)
self._check_deepcopy(link)
self.assertEqual(link.device.device, cuda.Device(device_id))
self.assertEqual(link.x.data.device.id, device_id)
self.assertEqual(link.y.data.device.id, device_id)
def test_to_cpu_on_cpu(self):
x = self.link.x.data
gx = self.link.x.grad
y = self.link.y.data
gy = self.link.y.grad
p = self.link.p
with testing.assert_warns(DeprecationWarning):
self.link.to_cpu()
self.assertIs(self.link.x.data, x)
self.assertIs(self.link.x.grad, gx)
self.assertIs(self.link.y.data, y)
self.assertIs(self.link.y.grad, gy)
self.assertIsNone(self.link.u.data)
u = self.link.u
with pytest.raises(RuntimeError):
u.grad
self.assertIs(self.link.p, p)
@attr.gpu
def test_to_cpu(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
with testing.assert_warns(DeprecationWarning):
self.link.to_cpu()
self.link.v.initialize((2, 3))
self.assertIs(self.link.xp, numpy)
self.assertIsInstance(self.link.x.data, numpy.ndarray)
self.assertIsInstance(self.link.x.grad, numpy.ndarray)
self.assertIsInstance(self.link.y.data, numpy.ndarray)
self.assertIsInstance(self.link.y.grad, numpy.ndarray)
self.assertIsNone(self.link.u.data)
u = self.link.u
with pytest.raises(RuntimeError):
u.grad
self.assertIsInstance(self.link.v.data, numpy.ndarray)
self.assertIsInstance(self.link.v.grad, numpy.ndarray)
self.assertIsInstance(self.link.p, numpy.ndarray)
@attr.gpu
def test_to_gpu(self):
cupy = cuda.cupy
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.link.v.initialize((2, 3))
self.assertIs(self.link.xp, cupy)
self.assertIsInstance(self.link.x.data, cupy.ndarray)
self.assertIsInstance(self.link.x.grad, cupy.ndarray)
self.assertIsInstance(self.link.y.data, cupy.ndarray)
self.assertIsInstance(self.link.y.grad, cupy.ndarray)
self.assertIsNone(self.link.u.data)
u = self.link.u
with pytest.raises(RuntimeError):
u.grad
self.assertIsInstance(self.link.v.data, cupy.ndarray)
self.assertIsInstance(self.link.v.grad, cupy.ndarray)
self.assertIsInstance(self.link.p, cupy.ndarray)
@attr.multi_gpu(2)
def test_to_gpu_different_current_device(self):
cuda.Device(1).use()
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu(0)
self.assertEqual(self.link.device.device, cuda.Device(0))
@attr.multi_gpu(2)
def test_to_gpu_different_device(self):
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu(0)
self.assertEqual(self.link.device.device, cuda.Device(0))
self.assertEqual(self.link.x.data.device, cuda.Device(0))
self.assertEqual(self.link.x.grad.device, cuda.Device(0))
self.assertEqual(self.link.y.data.device, cuda.Device(0))
self.assertEqual(self.link.y.grad.device, cuda.Device(0))
self.assertEqual(self.link.u.device.device, cuda.Device(0))
self.assertEqual(self.link.v.device.device, cuda.Device(0))
self.assertEqual(self.link.p.device, cuda.Device(0))
with testing.assert_warns(RuntimeWarning):
self.link.to_gpu(1)
self.assertEqual(self.link.device.device, cuda.Device(1))
self.assertEqual(self.link.x.data.device, cuda.Device(0))
self.assertEqual(self.link.x.grad.device, cuda.Device(0))
self.assertEqual(self.link.y.data.device, cuda.Device(0))
self.assertEqual(self.link.y.grad.device, cuda.Device(0))
self.assertEqual(self.link.u.device.device, cuda.Device(0))
self.assertEqual(self.link.v.device.device, cuda.Device(0))
self.assertEqual(self.link.p.device, cuda.Device(0))
@attr.multi_gpu(2)
def test_to_gpu_current_device(self):
cuda.Device(1).use()
with testing.assert_warns(DeprecationWarning):
self.link.to_gpu()
self.assertEqual(self.link.device.device, cuda.Device(1))
def test_params(self):
params = list(self.link.params())
self.assertEqual([id(p) for p in params],
[id(self.link.u), id(self.link.v),
id(self.link.x), id(self.link.y)])
def test_params_skip_uninit(self):
params = list(self.link.params(include_uninit=False))
self.assertEqual([id(p) for p in params],
[id(self.link.x), id(self.link.y)])
def test_namedparams(self):
namedparams = list(self.link.namedparams())
self.assertEqual([(name, id(p)) for name, p in namedparams],
[('/u', id(self.link.u)), ('/v', id(self.link.v)),
('/x', id(self.link.x)), ('/y', id(self.link.y))])
def test_namedparams_skip_uninit(self):
namedparams = list(self.link.namedparams(include_uninit=False))
self.assertEqual([(name, id(p)) for name, p in namedparams],
[('/x', id(self.link.x)), ('/y', id(self.link.y))])
def test_links(self):
links = list(self.link.links())
self.assertIs(links[0], self.link)
def test_links_skipself(self):
links = list(self.link.links(skipself=True))
self.assertFalse(links) # empty
def test_namedlinks(self):
pl = list(self.link.namedlinks())
self.assertEqual(len(pl), 1)
self.assertEqual(pl[0][0], '/')
self.assertIs(pl[0][1], self.link)
def _setup_test_copyparams(self):
self.link.x.grad.fill(0)
self.link.y.grad.fill(1)
self.link.u.initialize((2, 3))
self.link.u.data.fill(0)
self.link.u.grad.fill(1)
self.link.v.cleargrad()
gx = self.link.x.grad.copy()
gy = self.link.y.grad.copy()
gu = self.link.u.grad.copy()
l = chainer.Link()
with l.init_scope():
l.x = chainer.Parameter(shape=(2, 3))
l.y = chainer.Parameter(shape=2)
l.u = chainer.Parameter(shape=(2, 3))
l.v = chainer.Parameter(shape=(3, 2))
l.x.data.fill(2)
l.x.grad.fill(3)
l.y.data.fill(4)
l.y.grad.fill(5)
l.u.data.fill(6)
l.u.grad.fill(7)
l.v.data.fill(8)
l.v.grad.fill(9)
l.add_persistent('p', numpy.full_like(self.link.p, 10))
return l, (gx, gy, gu)
def _check_copyparams(self, l, gs):
gx, gy, gu = gs
numpy.testing.assert_array_equal(self.link.x.data, l.x.data)
numpy.testing.assert_array_equal(self.link.x.grad, gx)
numpy.testing.assert_array_equal(self.link.y.data, l.y.data)
numpy.testing.assert_array_equal(self.link.y.grad, gy)
numpy.testing.assert_array_equal(self.link.u.data, l.u.data)
numpy.testing.assert_array_equal(self.link.u.grad, gu)
numpy.testing.assert_array_equal(self.link.v.data, l.v.data)
numpy.testing.assert_array_equal(self.link.v.grad, None)
def test_copyparams(self):
l, gs = self._setup_test_copyparams()
self.link.copyparams(l)
self._check_copyparams(l, gs)
numpy.testing.assert_array_equal(self.link.p, l.p)
def test_copyparams_no_copy_persistent(self):
orig_p = self.link.p.copy()
l, gs = self._setup_test_copyparams()
numpy.testing.assert_array_equal(False, orig_p == l.p)
self.link.copyparams(l, copy_persistent=False)
self._check_copyparams(l, gs)
numpy.testing.assert_array_equal(self.link.p, orig_p)
def test_cleargrads(self):
self.link.cleargrads()
self.assertIsNone(self.link.x.grad)
self.assertIsNone(self.link.y.grad)
self.link.u.initialize((2, 3))
self.link.v.initialize((2, 3))
self.assertIsNone(self.link.u.grad)
self.assertIsNone(self.link.v.grad)
def test_zerograds(self):
gx_expect = numpy.zeros_like(self.link.x.data)
gy_expect = numpy.zeros_like(self.link.y.data)
with testing.assert_warns(DeprecationWarning):
self.link.zerograds()
numpy.testing.assert_array_equal(self.link.x.grad, gx_expect)
numpy.testing.assert_array_equal(self.link.y.grad, gy_expect)
self.link.u.initialize((2, 3))
self.link.v.initialize((2, 3))
gu_expect = numpy.zeros_like(self.link.u.data)
gv_expect = numpy.zeros_like(self.link.v.data)
numpy.testing.assert_array_equal(self.link.u.grad, gu_expect)
numpy.testing.assert_array_equal(self.link.v.grad, gv_expect)
def test_addgrads(self):
l = chainer.Link()
with l.init_scope():
l.x = chainer.Parameter(shape=(2, 3),
initializer=initializers.NaN('d'))
l.y = chainer.Parameter(shape=2)
l.u = chainer.Parameter(shape=(2, 3),
initializer=initializers.NaN('d'))
l.v = chainer.Parameter()
l.x.grad.fill(1)
l.y.grad.fill(2)
l.u.grad.fill(3)
# TODO(niboshi): Remove this line after #7140
l.v.cleargrad()
self.link.x.grad.fill(-1)
self.link.y.grad.fill(-2)
self.link.u.cleargrad()
self.link.addgrads(l)
gx_expect = numpy.zeros_like(l.x.grad)
gy_expect = numpy.zeros_like(l.y.grad)
gu_expect = l.u.grad
numpy.testing.assert_array_equal(self.link.x.grad, gx_expect)
numpy.testing.assert_array_equal(self.link.y.grad, gy_expect)
numpy.testing.assert_array_equal(self.link.u.grad, gu_expect)
v = self.link.v
with pytest.raises(RuntimeError):
v.grad
def test_enable_update(self):
self.link.enable_update()
self.assertTrue(self.link.x.update_rule.enabled)
self.assertTrue(self.link.u.update_rule.enabled)
def test_disable_update(self):
self.link.disable_update()
self.assertFalse(self.link.x.update_rule.enabled)
self.assertFalse(self.link.u.update_rule.enabled)
def test_update_enabled(self):
self.assertTrue(self.link.update_enabled)
self.link.disable_update()
self.assertFalse(self.link.update_enabled)
self.link.enable_update()
self.assertTrue(self.link.update_enabled)
def test_count_params(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert self.link.count_params() == 8
assert len(w) == 2
assert w[0].category is UserWarning
self.link.u.initialize((2, 3))
self.link.v.initialize((2, 3))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.link.count_params()
assert not w
@_inject_backend_tests
@testing.parameterize_pytest(
'shape_x,shape_y', [
((2, 3), 2),
((2, 3), ()),
((2, 3), (1, 0, 3)),
]
)
class TestLinkSerializeDeserialize(unittest.TestCase):
def test_serialize(self, backend_config):
call_record = []
def serializer(key, value):
call_record.append((key, value))
return value
l = chainer.Link()
with l.init_scope():
l.x = chainer.Parameter(shape=self.shape_x)
l.y = chainer.Parameter(shape=self.shape_y)
l.add_persistent('z', 1)
l.to_device(backend_config.device)
old_x_data = l.x.array
old_y_data = l.y.array
old_z = l.z
l.serialize(serializer)
# Link data are not modified
self.assertIs(l.x.array, old_x_data)
self.assertIs(l.y.array, old_y_data)
self.assertEqual(l.z, old_z)
# Check inputs to the serializer
self.assertEqual(len(call_record), 3)
call_record = sorted(call_record)
self.assertEqual(call_record[0][0], 'x')
self.assertIs(call_record[0][1], l.x.array)
self.assertEqual(call_record[1][0], 'y')
self.assertIs(call_record[1][1], l.y.array)
self.assertEqual(call_record[2][0], 'z')
self.assertEqual(call_record[2][1], old_z)
def test_deserialize(self, backend_config):
call_record = []
state = {
'x': _shaped_random(self.shape_x, 'float32'),
'y': _shaped_random(self.shape_y, 'float32'),
'z': numpy.random.randn(),
}
def deserializer(key, value):
call_record.append((key, value))
if key == 'z':
return state[key] # scalar
value[...] = backend_config.device.send(state[key])
return value
l = chainer.Link()
with l.init_scope():
l.x = chainer.Parameter(shape=self.shape_x)
l.y = chainer.Parameter(shape=self.shape_y)
l.add_persistent('z', 1)
l.to_device(backend_config.device)
old_x_data = l.x.array
old_y_data = l.y.array
old_z = l.z
l.serialize(deserializer)
# Check link data
self.assertIs(l.x.array, old_x_data)
self.assertIs(l.y.array, old_y_data)
_assert_arrays_equal(l.x.array, state['x'])
_assert_arrays_equal(l.y.array, state['y'])
self.assertEqual(l.z, state['z'])
# Check inputs to the deserializer
self.assertEqual(len(call_record), 3)
call_record = sorted(call_record)
self.assertEqual(call_record[0][0], 'x')
self.assertIs(call_record[0][1], l.x.array)
self.assertEqual(call_record[1][0], 'y')
self.assertIs(call_record[1][1], l.y.array)
self.assertEqual(call_record[2][0], 'z')
self.assertEqual(call_record[2][1], old_z)
def test_deserialize_uninitialized1(self, backend_config):
# Deserializes uninitialized parameters into initialized ones.
# TODO(niboshi): Currently the existing initialized parameters are
# untouched, but maybe uninitialized state should be restored? (#7916)
call_record = []
def deserializer(key, value):
call_record.append((key, value))
return None # to be uninitialized
l = chainer.Link()
with l.init_scope():
l.x = chainer.Parameter(shape=self.shape_x) # initialized
l.y = chainer.Parameter(shape=self.shape_y) # initialized
l.to_device(backend_config.device)
old_x_data = l.x.array
old_y_data = l.y.array
l.serialize(deserializer)
# Link is kept untouched
self.assertIs(l.x.array, old_x_data)
self.assertIs(l.y.array, old_y_data)
# Check inputs to the deserializer
self.assertEqual(len(call_record), 2)
call_record = sorted(call_record)
self.assertEqual(call_record[0][0], 'x')
self.assertIs(call_record[0][1], l.x.array)
self.assertEqual(call_record[1][0], 'y')
def test_deserialize_uninitialized2(self, backend_config):
# Deserializes initialized parameters into uninitialized ones.
call_record = []
state = {
'x': _shaped_random(self.shape_x, 'float32'),
'y': _shaped_random(self.shape_y, 'float32'),
'z': numpy.random.randn(),
}
def deserializer(key, value):
call_record.append((key, value))
# to be initialized
return backend_config.device.send(state[key])
l = chainer.Link()
with l.init_scope():
l.x = chainer.Parameter() # uninitialized
l.y = chainer.Parameter() # uninitialized
l.to_device(backend_config.device)
l.serialize(deserializer)
# Link is initialized
self.assertIsNotNone(l.x.array)
self.assertIsNotNone(l.y.array)
_assert_arrays_equal(l.x.array, state['x'])
_assert_arrays_equal(l.y.array, state['y'])
# Check inputs to the deserializer
self.assertEqual(len(call_record), 2)
call_record = sorted(call_record)
self.assertEqual(call_record[0][0], 'x')
self.assertIs(call_record[0][1], None)
self.assertEqual(call_record[1][0], 'y')
self.assertIs(call_record[1][1], None)
@_inject_backend_tests
class TestLinkSerializeDeserializedUninitializedParameter(unittest.TestCase):
def test_serialize(self, backend_config):
call_record = []
def serializer(key, value):
call_record.append((key, value))
return value
l = chainer.Link()
with l.init_scope():
l.x = chainer.Parameter() # uninitialized
l.to_device(backend_config.device)
l.serialize(serializer)
# Link is kept uninitialized
self.assertIsNone(l.x.array)
# Check inputs to the serializer
self.assertEqual(len(call_record), 1)
self.assertEqual(call_record[0][0], 'x')
self.assertIs(call_record[0][1], None)
def test_deserialize(self, backend_config):
# Deserializes uninitialized parameters into uninitialied ones.
call_record = []
def serializer(key, value):
call_record.append((key, value))
return None # to be uninitialized
l = chainer.Link()
with l.init_scope():
l.x = chainer.Parameter() # uninitialized
l.to_device(backend_config.device)
l.serialize(serializer)
# Link is kept uninitialized
self.assertIsNone(l.x.array)
# Check inputs to the serializer
self.assertEqual(len(call_record), 1)
self.assertEqual(call_record[0][0], 'x')
self.assertIs(call_record[0][1], None)
@_inject_backend_tests_no_intel64
@attr.chainerx
class TestLinkFromToChainerx(LinkTestBase, unittest.TestCase):
def test_from_chx(self, backend_config):
self.link.to_device(backend_config.device)
self.link.from_chx()
source_device = backend_config.device
self.check_param_init('x', (2, 3), 'd')
self.check_param_init('y', (2,), 'f')
self.check_param_uninit('u')
if source_device.xp is chainerx:
backend_name = source_device.device.backend.name
if backend_name == 'native':
expected_device = backend.CpuDevice()
elif backend_name == 'cuda':
expected_device = backend.GpuDevice.from_device_id(
source_device.device.index)
else:
expected_device = source_device
self.assertEqual(self.link.device, expected_device)
def test_to_chx(self, backend_config):
self.link.to_device(backend_config.device)
self.link.to_chx()
source_device = backend_config.device
self.check_param_init('x', (2, 3), 'd')
self.check_param_init('y', (2,), 'f')
self.check_param_uninit('u')
if source_device.xp is chainerx:
expected_device = source_device
elif source_device.xp is numpy:
expected_device = backend.ChainerxDevice(
chainerx.get_device('native', 0))
elif source_device.xp is cuda.cupy:
expected_device = backend.ChainerxDevice(
chainerx.get_device('cuda', source_device.device.id))
else:
assert False
self.assertEqual(self.link.device, expected_device)
class TestLinkMissingInitCall(unittest.TestCase):
# Tests for detecting incorrectly written Link subclasses in which
# the call to Link.__init__ is missing
expected_message = r'^Link\.__init__\(\) has not been called\.$'
def test_missing1(self):
# Nothing is done in __init__.
# The fault should be detected no later than __call__().
class Derived(chainer.Link):
def __init__(self):
pass
def forward(self, x):
return x
with pytest.raises(RuntimeError, match=self.expected_message):
link = Derived()
link(numpy.array([1, 2], numpy.float32))
def test_missing2(self):
# init_scope is called.
# The fault should be detected at init_scope.
class Derived(chainer.Link):
def __init__(self):
with self.init_scope():
pass
with pytest.raises(RuntimeError, match=self.expected_message):
Derived()
def test_missing3(self):
# add_param is called.
# The fault should be detected at add_param.
class Derived(chainer.Link):
def __init__(self):
self.add_param('p1', (2, 3), numpy.float32)
with pytest.raises(RuntimeError, match=self.expected_message):
Derived()
class TestLinkRepeat(unittest.TestCase):
def setUp(self):
class Layer(chainer.Link):
def __init__(self):
super(Layer, self).__init__()
with self.init_scope():
self.x = chainer.Parameter(
chainer.initializers.Normal(), shape=(2, 3))
def forward(self):
pass
self.link = Layer()
def test_no_repeat(self):
ret = self.link.repeat(0)
self.assertEqual(len(ret), 0)
def test_repeat_with_init(self):
ret = self.link.repeat(2, mode='init')
self.assertEqual(len(ret), 2)
# Both should be different objects from the original link
self.assertIsNot(ret[0], self.link)
self.assertIsNot(ret[1], self.link)
# Object IDs of elements should be different
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].x, ret[1].x)
# But shape and type of paratmeres shuld be same
self.assertEqual(ret[0].x.shape, self.link.x.shape)
self.assertEqual(ret[0].x.dtype, self.link.x.dtype)
self.assertEqual(ret[0].x.shape, ret[1].x.shape)
self.assertEqual(ret[0].x.dtype, ret[1].x.dtype)
# Parameters are re-initialized, so the values should be different
self.assertFalse(numpy.all(ret[0].x.array == ret[1].x.array))
def test_repeat_with_copy(self):
ret = self.link.repeat(2, mode='copy')
self.assertEqual(len(ret), 2)
# Both should be different objects from the original link
self.assertIsNot(ret[0], self.link)
self.assertIsNot(ret[1], self.link)
# Object IDs of elements should be different
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].x, ret[1].x)
# But shape, type, and value of paratmeres shuld be same
self.assertEqual(ret[0].x.shape, self.link.x.shape)
self.assertEqual(ret[0].x.dtype, self.link.x.dtype)
self.assertEqual(ret[0].x.shape, ret[1].x.shape)
self.assertEqual(ret[0].x.dtype, ret[1].x.dtype)
numpy.testing.assert_array_equal(ret[0].x.array, ret[1].x.array)
def test_repeat_with_share(self):
ret = self.link.repeat(2, mode='share')
self.assertEqual(len(ret), 2)
# Both should be different objects from the original link
self.assertIsNot(ret[0], self.link)
self.assertIsNot(ret[1], self.link)
# Object IDs of elements should be different
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].x, ret[1].x)
# But the array objects should be the same
self.assertIs(ret[0].x.array, ret[1].x.array)
# But shape, type, and value of paratmeres shuld be same
self.assertEqual(ret[0].x.shape, self.link.x.shape)
self.assertEqual(ret[0].x.dtype, self.link.x.dtype)
self.assertEqual(ret[0].x.shape, ret[1].x.shape)
self.assertEqual(ret[0].x.dtype, ret[1].x.dtype)
numpy.testing.assert_array_equal(ret[0].x.array, ret[1].x.array)
class CountParameter(chainer.Parameter):
def __init__(self, v):
super(CountParameter, self).__init__(v.data, name=v.name)
self.data = v.data
self.grad = v.grad if v.data is not None else None
self.count_zerograd = 0
def zerograd(self):
self.count_zerograd += 1
super(CountParameter, self).zerograd()
class ChainTestBase(object):
def setUp(self):
# Schematic:
# c2
# - c1
# - l1 (x: uninitialized with shape=(2, 3))
# - l2 (x: uninitialized with shape=2)
# - l3 (x: uninitialized without shape)
# - (x: unitialized with shape=2)
self.l1 = chainer.Link()
with self.l1.init_scope():
self.l1.x = chainer.Parameter(shape=(2, 3))
self.l2 = chainer.Link()
with self.l2.init_scope():
self.l2.x = chainer.Parameter(shape=2)
self.l3 = chainer.Link()
with self.l3.init_scope():
self.l3.x = chainer.Parameter()
self.c1 = chainer.Chain()
with self.c1.init_scope():
self.c1.l1 = self.l1
self.c1.add_link('l2', self.l2)
self.c2 = chainer.Chain()
with self.c2.init_scope():
self.c2.c1 = self.c1
self.c2.l3 = self.l3
self.x = chainer.Parameter(shape=2)
self.c2.x = self.x
def set_count_parameters(self):
self.l1.x = CountParameter(self.l1.x)
self.l2.x = CountParameter(self.l2.x)
self.l3.x = CountParameter(self.l3.x)
class TestChain(ChainTestBase, unittest.TestCase):
def test_init(self):
self.assertIs(self.c1.l1, self.l1)
self.assertIs(self.c1['l1'], self.l1)
self.assertEqual(self.l1.name, 'l1')
self.assertIs(self.c2.c1, self.c1)
self.assertIs(self.c2['c1'], self.c1)
self.assertEqual(self.c1.name, 'c1')
self.assertIs(self.c2.l3, self.l3)
self.assertIs(self.c2['l3'], self.l3)
self.assertEqual(self.l3.name, 'l3')
def test_str(self):
self.assertEqual(str(chainer.Chain()), 'Chain()')
self.assertEqual(
str(self.c2),
'''\
Chain(
(c1): Chain(
(l1): Link(),
(l2): Link(),
),
(l3): Link(),
)''',
)
def test_add_link(self):
self.assertIs(self.c1.l2, self.l2)
self.assertEqual(self.l2.name, 'l2')
def test_add_link_to_existing_attribute(self):
self.l1.z = 0
with self.assertRaises(AttributeError):
self.l1.add_link('z', chainer.Link())
def test_assign_link_outside_of_init_scope(self):
l = chainer.Link()
self.l1.l = l
self.assertTrue(all(l is not link for link in self.l1.links()))
def test_delete_link(self):
del self.c1.l1
self.assertFalse(hasattr(self.c1, 'l1'))
self.assertNotIn('l1', self.c1._children)
def test_copy_with_share_mode(self):
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
self.x.initializer = initializers.Normal(
dtype=self.x.initializer.dtype)
self.x.initialize(self.x.shape)
c2 = self.c2.copy(mode='share')
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, set)
self.assertTrue(hasattr(c2, 'x'))
self.assertIsNot(c2.x, self.x)
self.assertIs(c2.x.data, self.x.data)
self.assertTrue(hasattr(c2, 'c1'))
self.assertEqual(c2.c1.name, 'c1')
self.assertIsInstance(c2.c1._children, set)
self.assertIsNot(c2.c1, self.c1)
self.assertEqual(c2.c1.l1.name, 'l1')
self.assertIsNot(c2.c1.l1, self.l1)
self.assertIsNot(c2.c1.l1.x, self.l1.x)
self.assertIs(c2.c1.l1.x.data, self.l1.x.data)
self.assertIs(c2.c1.l1.x.grad, None)
self.assertTrue(hasattr(c2.c1, 'l2'))
self.assertEqual(c2.c1.l2.name, 'l2')
self.assertIsNot(c2.c1.l2, self.l2)
self.assertIsNot(c2.c1.l2.x, self.l2.x)
self.assertIs(c2.c1.l2.x.data, self.l2.x.data)
self.assertIs(c2.c1.l2.x.grad, None)
self.assertTrue(hasattr(c2, 'l3'))
self.assertEqual(c2.l3.name, 'l3')
self.assertIsNot(c2.l3, self.l3)
self.assertIsNot(c2.l3.x, self.l3.x)
self.assertIs(c2.l3.x.data, self.l3.x.data)
self.assertIs(c2.l3.x.grad, None)
def test_copy_with_copy_mode(self):
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
self.x.initializer = initializers.Normal(
dtype=self.x.initializer.dtype)
self.x.initialize(self.x.shape)
c2 = self.c2.copy(mode='copy')
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, set)
self.assertTrue(hasattr(c2, 'x'))
self.assertIsNot(c2.x, self.x)
self.assertIsNot(c2.x.data, self.x.data)
self.assertTrue(numpy.array_equal(c2.x.data, self.x.data))
self.assertTrue(hasattr(c2, 'c1'))
self.assertEqual(c2.c1.name, 'c1')
self.assertIsInstance(c2.c1._children, set)
self.assertIsNot(c2.c1, self.c1)
self.assertEqual(c2.c1.l1.name, 'l1')
self.assertIsNot(c2.c1.l1, self.l1)
self.assertIsNot(c2.c1.l1.x, self.l1.x)
self.assertIsNot(c2.c1.l1.x.data, self.l1.x.data)
self.assertTrue(numpy.array_equal(c2.c1.l1.x.data, self.l1.x.data))
self.assertIs(c2.c1.l1.x.grad, None)
self.assertTrue(hasattr(c2.c1, 'l2'))
self.assertEqual(c2.c1.l2.name, 'l2')
self.assertIsNot(c2.c1.l2, self.l2)
self.assertIsNot(c2.c1.l2.x, self.l2.x)
self.assertIsNot(c2.c1.l2.x.data, self.l2.x.data)
self.assertTrue(numpy.array_equal(c2.c1.l2.x.data, self.l2.x.data))
self.assertIs(c2.c1.l2.x.grad, None)
self.assertTrue(hasattr(c2, 'l3'))
self.assertEqual(c2.l3.name, 'l3')
self.assertIsNot(c2.l3, self.l3)
self.assertIsNot(c2.l3.x, self.l3.x)
self.assertIs(c2.l3.x.data, self.l3.x.data)
x = c2.l3.x
with pytest.raises(RuntimeError):
x.grad
def test_copy_with_init_mode(self):
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
self.x.initializer = initializers.Normal(
dtype=self.x.initializer.dtype)
self.c2.x.initialize(self.x.shape)
c2 = self.c2.copy(mode='init')
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, set)
self.assertTrue(hasattr(c2, 'x'))
self.assertIsNot(c2.x, self.x)
self.assertIsNot(c2.x.data, self.x.data)
self.assertFalse(numpy.array_equal(c2.x.data, self.x.data))
# _grad_initializer attribute in a copied Parameter has constant.NaN
# after calling initilize() method
self.assertTrue(numpy.isnan(c2.x.grad).all())
self.assertTrue(hasattr(c2, 'c1'))
self.assertEqual(c2.c1.name, 'c1')
self.assertIsInstance(c2.c1._children, set)
self.assertIsNot(c2.c1, self.c1)
self.assertEqual(c2.c1.l1.name, 'l1')
self.assertIsNot(c2.c1.l1, self.l1)
self.assertIsNot(c2.c1.l1.x, self.l1.x)
self.assertIsNot(c2.c1.l1.x.data, self.l1.x.data)
self.assertFalse(numpy.array_equal(c2.c1.l1.x.data, self.l1.x.data))
# _grad_initializer attribute in a copied Parameter has constant.NaN
# after calling initilize() method
self.assertTrue(numpy.isnan(c2.c1.l1.x.grad).all())
self.assertTrue(hasattr(c2.c1, 'l2'))
self.assertEqual(c2.c1.l2.name, 'l2')
self.assertIsNot(c2.c1.l2, self.l2)
self.assertIsNot(c2.c1.l2.x, self.l2.x)
self.assertIsNot(c2.c1.l2.x.data, self.l2.x.data)
self.assertFalse(numpy.array_equal(c2.c1.l2.x.data, self.l2.x.data))
# _grad_initializer attribute in a copied Parameter has constant.NaN
# after calling initilize() method
self.assertTrue(numpy.isnan(c2.c1.l2.x.grad).all())
self.assertTrue(hasattr(c2, 'l3'))
self.assertEqual(c2.l3.name, 'l3')
self.assertIsNot(c2.l3, self.l3)
self.assertIsNot(c2.l3.x, self.l3.x)
self.assertIs(c2.l3.x.data, self.l3.x.data)
# A Parameter constructed with shape argument but not initialized
# has invalid grad
with pytest.raises(RuntimeError):
c2.l3.x.grad
def test_to_cpu_on_cpu(self):
x1 = self.l1.x.data
gx1 = self.l1.x.grad
x2 = self.l2.x.data
gx2 = self.l2.x.grad
x3 = self.l3.x.data
with testing.assert_warns(DeprecationWarning):
self.c2.to_cpu()
self.assertIs(self.l1.x.data, x1)
self.assertIs(self.l1.x.grad, gx1)
self.assertIs(self.l2.x.data, x2)
self.assertIs(self.l2.x.grad, gx2)
self.assertIs(self.l3.x.data, x3)
with pytest.raises(RuntimeError):
self.l3.x.grad
@attr.gpu
def test_to_cpu(self):
self.set_count_parameters()
with testing.assert_warns(DeprecationWarning):
self.c2.to_gpu()
with testing.assert_warns(DeprecationWarning):
self.c2.to_cpu()
self.assertIs(self.c2.xp, numpy)
self.assertIs(self.c1.xp, numpy)
self.assertIs(self.l1.xp, numpy)
self.assertIs(self.l2.xp, numpy)
self.assertIs(self.l3.xp, numpy)
self.assertIsInstance(self.l1.x.data, numpy.ndarray)
self.assertIsInstance(self.l1.x.grad, numpy.ndarray)
self.assertIsInstance(self.l2.x.data, numpy.ndarray)
self.assertIsInstance(self.l2.x.grad, numpy.ndarray)
self.assertIsNone(self.l3.x.data)
self.assertIsNone(self.l3.x.grad)
self.l3.x.initialize(3)
self.assertIsInstance(self.l3.x.data, numpy.ndarray)
self.assertIsInstance(self.l3.x.grad, numpy.ndarray)
@attr.gpu
def test_to_gpu(self):
self.set_count_parameters()
cupy = cuda.cupy
with testing.assert_warns(DeprecationWarning):
self.c2.to_gpu()
self.assertIs(self.c2.xp, cupy)
self.assertIs(self.c1.xp, cupy)
self.assertIs(self.l1.xp, cupy)
self.assertIs(self.l2.xp, cupy)
self.assertIs(self.l3.xp, cupy)
self.assertIsInstance(self.l1.x.data, cupy.ndarray)
self.assertIsInstance(self.l1.x.grad, cupy.ndarray)
self.assertIsInstance(self.l2.x.data, cupy.ndarray)
self.assertIsInstance(self.l2.x.grad, cupy.ndarray)
self.assertIsNone(self.l3.x.data)
self.assertIsNone(self.l3.x.grad)
self.l3.x.initialize(3)
self.assertIsInstance(self.l3.x.data, cupy.ndarray)
self.assertIsInstance(self.l3.x.grad, cupy.ndarray)
def test_to_device(self):
self.set_count_parameters()
device = backend.CpuDevice()
self.c2.to_device(device)
self.assertIs(self.c2.xp, numpy)
self.assertIs(self.c1.xp, numpy)
self.assertIs(self.l1.xp, numpy)
self.assertIs(self.l2.xp, numpy)
self.assertIs(self.l3.xp, numpy)
self.assertIsInstance(self.l1.x.data, numpy.ndarray)
self.assertIsInstance(self.l1.x.grad, numpy.ndarray)
self.assertIsInstance(self.l2.x.data, numpy.ndarray)
self.assertIsInstance(self.l2.x.grad, numpy.ndarray)
self.assertIsNone(self.l3.x.data)
self.l3.x.initialize((3,))
self.assertIsInstance(self.l3.x.data, numpy.ndarray)
self.assertIsInstance(self.l3.x.grad, numpy.ndarray)
def test_params(self):
params = list(self.c2.params())
self.assertEqual([id(p) for p in params],
[id(self.x), id(self.l1.x),
id(self.l2.x), id(self.l3.x)])
def test_params_skip_uninit(self):
params = list(self.c2.params(include_uninit=False))
self.assertEqual([id(p) for p in params],
[id(self.x), id(self.l1.x), id(self.l2.x)])
def test_namedparams(self):
namedparams = list(self.c2.namedparams())
self.assertEqual([(name, id(p)) for name, p in namedparams],
[('/x', id(self.x)),
('/c1/l1/x', id(self.l1.x)),
('/c1/l2/x', id(self.l2.x)),
('/l3/x', id(self.l3.x))])
def test_namedparams_skip_uninit(self):
namedparams = list(self.c2.namedparams(include_uninit=False))
self.assertEqual([(name, id(p)) for name, p in namedparams],
[('/x', id(self.x)),
('/c1/l1/x', id(self.l1.x)),
('/c1/l2/x', id(self.l2.x))])
def test_links(self):
links = list(self.c2.links())
self.assertEqual([id(l) for l in links],
[id(l) for l in [self.c2,
self.c1, self.l1, self.l2,
self.l3]])
def test_links_skipself(self):
links = list(self.c2.links(skipself=True))
self.assertEqual([id(l) for l in links],
[id(l) for l in [self.c1, self.l1, self.l2,
self.l3]])
def test_namedlinks(self):
namedlinks = list(self.c2.namedlinks())
self.assertEqual([(name, id(l)) for name, l in namedlinks],
[('/', id(self.c2)),
('/c1', id(self.c1)),
('/c1/l1', id(self.l1)),
('/c1/l2', id(self.l2)),
('/l3', id(self.l3))])
def test_namedlinks_skipself(self):
namedlinks = list(self.c2.namedlinks(skipself=True))
self.assertEqual([(name, id(l)) for name, l in namedlinks],
[('/c1', id(self.c1)),
('/c1/l1', id(self.l1)),
('/c1/l2', id(self.l2)),
('/l3', id(self.l3))])
def test_children(self):
children = list(self.c2.children())
self.assertEqual([id(c) for c in children], [id(self.c1), id(self.l3)])
def test_copyparams(self):
l1 = chainer.Link()
with l1.init_scope():
l1.x = chainer.Parameter(shape=(2, 3))
l2 = chainer.Link()
with l2.init_scope():
l2.x = chainer.Parameter(shape=2)
l3 = chainer.Link()
with l3.init_scope():
l3.x = chainer.Parameter(shape=3)
c1 = chainer.Chain()
with c1.init_scope():
c1.l1 = l1
c1.l2 = l2
c2 = chainer.Chain()
with c2.init_scope():
c2.c1 = c1
c2.l3 = l3
c2.x = chainer.Parameter(shape=2)
l1.x.data.fill(0)
l2.x.data.fill(1)
l3.x.data.fill(2)
c2.x.data.fill(3)
self.c2.copyparams(c2)
numpy.testing.assert_array_equal(self.l1.x.data, l1.x.data)
numpy.testing.assert_array_equal(self.l2.x.data, l2.x.data)
numpy.testing.assert_array_equal(self.l3.x.data, l3.x.data)
numpy.testing.assert_array_equal(self.c2.x.data, c2.x.data)
def test_zerograds(self):
self.set_count_parameters()
with testing.assert_warns(DeprecationWarning):
self.c2.zerograds()
numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3)))
numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2))
self.assertEqual(self.l1.x.count_zerograd, 1)
self.assertEqual(self.l2.x.count_zerograd, 1)
self.assertEqual(self.l3.x.count_zerograd, 1)
self.l3.x.initialize(3)
numpy.testing.assert_array_equal(self.l3.x.grad, numpy.zeros(3))
def test_addgrads(self):
l1 = chainer.Link()
with l1.init_scope():
l1.x = chainer.Parameter(shape=(2, 3))
l2 = chainer.Link()
with l2.init_scope():
l2.x = chainer.Parameter(shape=2)
l3 = chainer.Link()
with l3.init_scope():
l3.x = chainer.Parameter(shape=3)
c1 = chainer.Chain()
with c1.init_scope():
c1.l1 = l1
c1.l2 = l2
c2 = chainer.Chain()
with c2.init_scope():
c2.c1 = c1
c2.l3 = l3
c2.x = chainer.Parameter(shape=2)
l1.x.grad.fill(1)
l2.x.grad.fill(2)
l3.x.grad.fill(3)
c2.x.grad.fill(4)
self.l1.x.grad.fill(-1)
self.l2.x.grad.fill(-2)
self.c2.x.grad.fill(-3)
self.l3.cleargrads()
self.c2.addgrads(c2)
numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3)))
numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2))
numpy.testing.assert_array_equal(self.l3.x.grad, numpy.full(3, 3.))
numpy.testing.assert_array_equal(self.c2.x.grad, numpy.ones(2))
def test_serialize(self):
mocks = {'l1': mock.MagicMock(), 'l2': mock.MagicMock()}
serializer = mock.MagicMock()
serializer.__getitem__.side_effect = lambda k: mocks[k]
self.c1.serialize(serializer)
self.assertEqual(serializer.call_count, 0)
self.assertEqual(serializer.__getitem__.call_count, 2)
serializer.__getitem__.assert_any_call('l1')
serializer.__getitem__.assert_any_call('l2')
mocks['l1'].assert_called_with('x', self.l1.x.data)
mocks['l2'].assert_called_with('x', self.l2.x.data)
def test_count_params(self):
assert self.c1.count_params() == 8
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.c2.count_params()
assert len(w) == 1
assert w[0].category is UserWarning
self.c2.l3.x.initialize((3,))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.c2.count_params()
assert not w
@_inject_backend_tests_no_intel64
@attr.chainerx
class TestChainFromToChainerx(ChainTestBase, unittest.TestCase):
def check_array_device(self, array, expected_device):
expected_ndarray = expected_device.xp.ndarray
self.assertIsInstance(array, expected_ndarray)
if expected_device.xp in (chainerx, cuda.cupy):
assert array.device == expected_device.device
def check_expected_device(self, expected_device):
expected_xp = expected_device.xp
self.assertIs(self.c2.xp, expected_xp)
self.assertIs(self.c1.xp, expected_xp)
self.assertIs(self.l1.xp, expected_xp)
self.assertIs(self.l2.xp, expected_xp)
self.assertIs(self.l3.xp, expected_xp)
self.check_array_device(self.l1.x.data, expected_device)
self.check_array_device(self.l1.x.grad, expected_device)
self.check_array_device(self.l2.x.data, expected_device)
self.check_array_device(self.l2.x.grad, expected_device)
self.assertIsNone(self.l3.x.data)
self.l3.x.initialize((3,))
self.check_array_device(self.l3.x.data, expected_device)
self.check_array_device(self.l3.x.grad, expected_device)
def test_to_chx(self, backend_config):
self.set_count_parameters()
self.c2.to_device(backend_config.device)
self.c2.to_chx()
src_device = backend_config.device
if src_device.xp is chainerx:
expected_device = src_device
else:
expected_device = (
backend.ChainerxDevice.from_fallback_device(src_device))
self.check_expected_device(expected_device)
def test_from_chx(self, backend_config):
self.set_count_parameters()
self.c2.to_device(backend_config.device)
self.c2.from_chx()
src_device = backend_config.device
if src_device.xp is chainerx:
expected_device = src_device.fallback_device
else:
expected_device = src_device
self.check_expected_device(expected_device)
class TestChainRepeat(unittest.TestCase):
def setUp(self):
class ChainForTest(chainer.Chain):
def __init__(self):
super(ChainForTest, self).__init__()
with self.init_scope():
self.link = chainer.Link()
def forward(self):
pass
self.chain = ChainForTest()
self.link = self.chain.link
with self.link.init_scope():
self.link.x = chainer.Parameter(
chainer.initializers.Normal(), shape=(2, 3))
def test_no_repeat(self):
ret = self.chain.repeat(0)
self.assertEqual(len(ret), 0)
def test_repeat_with_share_mode(self):
ret = self.chain.repeat(2, mode='share')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chain)
self.assertIsNot(ret[1], self.chain)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].link, self.chain.link)
self.assertIsNot(ret[1].link, self.chain.link)
self.assertIsNot(ret[0].link, ret[1].link)
self.assertIsNot(ret[0].link.x, self.chain.link.x)
self.assertIsNot(ret[1].link.x, self.chain.link.x)
self.assertIsNot(ret[0].link.x, ret[1].link.x)
self.assertIs(ret[0].link.x.data, self.chain.link.x.data)
self.assertIs(ret[0].link.x.data, ret[1].link.x.data)
self.assertEqual(ret[0].link.x.shape, self.chain.link.x.shape)
self.assertEqual(ret[0].link.x.shape, ret[1].link.x.shape)
self.assertEqual(ret[0].link.x.dtype, self.chain.link.x.dtype)
self.assertEqual(ret[0].link.x.dtype, ret[1].link.x.dtype)
def test_repeat_with_copy_mode(self):
ret = self.chain.repeat(2, mode='copy')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chain)
self.assertIsNot(ret[1], self.chain)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].link, self.chain.link)
self.assertIsNot(ret[1].link, self.chain.link)
self.assertIsNot(ret[0].link, ret[1].link)
self.assertIsNot(ret[0].link.x, self.link.x)
self.assertIsNot(ret[1].link.x, self.link.x)
self.assertIsNot(ret[0].link.x, ret[1].link.x)
self.assertIsNot(ret[0].link.x.data, self.chain.link.x.data)
self.assertIsNot(ret[1].link.x.data, self.chain.link.x.data)
self.assertIsNot(ret[0].link.x.data, ret[1].link.x.data)
self.assertTrue(numpy.array_equal(
ret[0].link.x.data, self.chain.link.x.data))
self.assertTrue(numpy.array_equal(
ret[0].link.x.data, ret[1].link.x.data))
self.assertEqual(ret[0].link.x.shape, self.chain.link.x.shape)
self.assertEqual(ret[0].link.x.shape, ret[1].link.x.shape)
self.assertEqual(ret[0].link.x.dtype, self.chain.link.x.dtype)
self.assertEqual(ret[0].link.x.dtype, ret[1].link.x.dtype)
def test_repeat_with_init_mode(self):
ret = self.chain.repeat(2, mode='init')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chain)
self.assertIsNot(ret[1], self.chain)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0].link, self.chain.link)
self.assertIsNot(ret[1].link, self.chain.link)
self.assertIsNot(ret[0].link.x, ret[1].link.x)
self.assertIsNot(ret[0].link.x.data, self.chain.link.x.data)
self.assertIsNot(ret[1].link.x.data, self.chain.link.x.data)
self.assertIsNot(ret[0].link.x.data, ret[1].link.x.data)
self.assertFalse(numpy.array_equal(
ret[0].link.x.data, self.chain.link.x.data))
self.assertFalse(numpy.array_equal(
ret[1].link.x.data, self.chain.link.x.data))
self.assertFalse(numpy.array_equal(
ret[0].link.x.data, ret[1].link.x.data))
self.assertEqual(ret[0].link.x.shape, self.chain.link.x.shape)
self.assertEqual(ret[0].link.x.shape, ret[1].link.x.shape)
self.assertEqual(ret[0].link.x.dtype, self.chain.link.x.dtype)
self.assertEqual(ret[0].link.x.dtype, ret[1].link.x.dtype)
class TestChainList(unittest.TestCase):
def setUp(self):
self.l1 = chainer.Link()
with self.l1.init_scope():
self.l1.x = chainer.Parameter(shape=(2, 3))
self.l1.y = chainer.Parameter()
self.l2 = chainer.Link()
with self.l2.init_scope():
self.l2.x = chainer.Parameter(shape=2)
self.l3 = chainer.Link()
with self.l3.init_scope():
self.l3.x = chainer.Parameter(shape=3)
self.l4 = chainer.Link()
self.l5 = chainer.Link()
self.l6 = chainer.Link()
self.c1 = chainer.ChainList(self.l1)
self.c1.add_link(self.l2)
self.c2 = chainer.ChainList(self.c1)
self.c2.append(self.l3)
self.c3 = chainer.ChainList(self.l4)
def test_init(self):
self.assertIs(self.c1[0], self.l1)
self.assertEqual(self.l1.name, '0')
self.assertIs(self.c2[0], self.c1)
self.assertEqual(self.c1.name, '0')
def test_str(self):
self.assertEqual(str(chainer.ChainList()), 'ChainList()')
self.assertEqual(
str(self.c2),
'''\
ChainList(
(0): ChainList(
(0): Link(),
(1): Link(),
),
(1): Link(),
)''',
)
def test_add_link(self):
self.assertIs(self.c1[1], self.l2)
self.assertEqual(self.l2.name, '1')
def test_append(self):
self.assertIs(self.c2[1], self.l3)
self.assertEqual(self.l3.name, '1')
def test_setitem(self):
self.c1[1] = self.l3
self.assertEqual(self.l3.name, '1')
def test_setitem_slice(self):
self.c1.append(self.l3) # l1 l2 l3
self.c1[3:0:-1] = [self.l4, self.l5] # l1 l5 l4
self.assertEqual(len(self.c1), 3)
self.assertEqual(self.l1.name, '0')
self.assertEqual(self.l4.name, '2')
self.assertEqual(self.l5.name, '1')
def test_setitem_slice_short(self):
self.c1.append(self.l3) # l1 l2 l3
self.c1[1:3] = [self.l4] # l1 l4
self.assertEqual(len(self.c1), 2)
self.assertEqual(self.l1.name, '0')
self.assertEqual(self.l4.name, '1')
def test_setitem_slice_long(self):
self.c1.append(self.l3) # l1 l2 l3
self.c1[1:3] = [self.l4, self.l5, self.l6] # l1 l4 l5 l6
self.assertEqual(len(self.c1), 4)
self.assertEqual(self.l1.name, '0')
self.assertEqual(self.l4.name, '1')
self.assertEqual(self.l5.name, '2')
self.assertEqual(self.l6.name, '3')
def test_iadd(self):
self.c2 += self.c3
self.assertIs(len(self.c2), 3)
self.assertEqual(self.l4.name, '2')
def test_delete_item(self):
del self.c2[0]
self.assertEqual(len(self.c2), 1)
self.assertEqual(self.l3.name, '0')
def test_assign_param_in_init_scope(self):
p = chainer.Parameter()
with self.c1.init_scope():
self.c1.p = p
self.assertIn(p, self.c1.params())
def test_assign_link_in_init_scope(self):
l = chainer.Link()
with self.c1.init_scope():
with self.assertRaises(TypeError):
self.c1.l = l
def test_iter(self):
links = list(self.c2)
self.assertEqual(2, len(links))
self.assertIs(links[0], self.c1)
self.assertIs(links[1], self.l3)
def test_len(self):
self.assertEqual(len(self.c1), 2)
self.assertEqual(len(self.c2), 2)
def test_copy_with_share_mode(self):
c2 = self.c2.copy(mode='share')
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, list)
self.assertIsNot(c2[0], self.c1)
self.assertEqual(c2[0].name, '0')
self.assertIsInstance(c2[0]._children, list)
self.assertIsNot(c2[0][0], self.l1)
self.assertEqual(c2[0][0].name, '0')
self.assertIsNot(c2[0][0].x, self.l1.x)
self.assertIs(c2[0][0].x.data, self.l1.x.data)
self.assertIs(c2[0][0].x.grad, None)
self.assertIsNot(c2[0][1], self.l2)
self.assertEqual(c2[0][1].name, '1')
self.assertIsNot(c2[0][1].x, self.l2.x)
self.assertIs(c2[0][1].x.data, self.l2.x.data)
self.assertIs(c2[0][1].x.grad, None)
self.assertIsNot(c2[1], self.l3)
self.assertEqual(c2[1].name, '1')
self.assertIsNot(c2[1].x, self.l3.x)
self.assertIs(c2[1].x.data, self.l3.x.data)
self.assertIs(c2[1].x.grad, None)
def test_copy_with_copy_mode(self):
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
c2 = self.c2.copy(mode='copy')
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, list)
self.assertEqual(c2[0].name, '0')
self.assertIsInstance(c2[0]._children, list)
self.assertIsNot(c2[0][0], self.l1)
self.assertEqual(c2[0][0].name, '0')
self.assertIsNot(c2[0][0].x, self.l1.x)
self.assertIsNot(c2[0][0].x.data, self.l1.x.data)
self.assertTrue(numpy.array_equal(c2[0][0].x.data, self.l1.x.data))
self.assertIs(c2[0][0].x.grad, None)
self.assertIsNot(c2[0][1], self.l2)
self.assertEqual(c2[0][1].name, '1')
self.assertIsNot(c2[0][1].x, self.l2.x)
self.assertIsNot(c2[0][1].x.data, self.l2.x.data)
self.assertTrue(numpy.array_equal(c2[0][1].x.data, self.l2.x.data))
self.assertIs(c2[0][1].x.grad, None)
self.assertIsNot(c2[1], self.l3)
self.assertEqual(c2[1].name, '1')
self.assertIsNot(c2[1].x, self.l3.x)
self.assertIsNot(c2[1].x.data, self.l3.x.data)
# l3 is constructed with shape argument but not initialized
self.assertTrue(numpy.isnan(c2[1].x.grad).all())
def test_copy_with_init_mode(self):
self.l1.x.initializer = initializers.Normal(
dtype=self.l1.x.initializer.dtype)
self.l1.x.initialize(self.l1.x.shape)
self.l2.x.initializer = initializers.Normal(
dtype=self.l2.x.initializer.dtype)
self.l2.x.initialize(self.l2.x.shape)
c2 = self.c2.copy(mode='init')
self.assertIs(c2.name, None)
self.assertIsInstance(c2._children, list)
self.assertEqual(c2[0].name, '0')
self.assertIsInstance(c2[0]._children, list)
self.assertIsNot(c2[0][0], self.l1)
self.assertEqual(c2[0][0].name, '0')
self.assertIsNot(c2[0][0].x, self.l1.x)
self.assertIsNot(c2[0][0].x.data, self.l1.x.data)
self.assertFalse(numpy.array_equal(c2[0][0].x.data, self.l1.x.data))
# _grad_initializer attribute in a copied Parameter has constant.NaN
# after calling initilize() method
self.assertTrue(numpy.isnan(c2[0][0].x.grad).all())
self.assertIsNot(c2[0][1], self.l2)
self.assertEqual(c2[0][1].name, '1')
self.assertIsNot(c2[0][1].x, self.l2.x)
self.assertIsNot(c2[0][1].x.data, self.l2.x.data)
self.assertFalse(numpy.array_equal(c2[0][1].x.data, self.l2.x.data))
# _grad_initializer attribute in a copied Parameter has constant.NaN
# after calling initilize() method
self.assertTrue(numpy.isnan(c2[0][1].x.grad).all())
self.assertIsNot(c2[1], self.l3)
self.assertEqual(c2[1].name, '1')
self.assertIsNot(c2[1].x, self.l3.x)
self.assertTrue(numpy.isnan(c2[1].x.data).all())
self.assertTrue(numpy.isnan(c2[1].x.grad).all())
@attr.gpu
def test_copy_and_send_to_gpu(self):
c2 = self.c2.copy()
with testing.assert_warns(DeprecationWarning):
self.c2.to_gpu()
self.assertIsInstance(self.c2[0][0].x.data, cuda.cupy.ndarray)
self.assertIsInstance(self.c2[0][1].x.data, cuda.cupy.ndarray)
self.assertIsInstance(c2[0][0].x.data, numpy.ndarray)
self.assertIsInstance(c2[0][1].x.data, numpy.ndarray)
@attr.gpu
def test_copy_and_send_to_gpu_2(self):
c2 = self.c2.copy()
with testing.assert_warns(DeprecationWarning):
c2.to_gpu()
self.assertIsInstance(self.c2[0][0].x.data, numpy.ndarray)
self.assertIsInstance(self.c2[0][1].x.data, numpy.ndarray)
self.assertIsInstance(c2[0][0].x.data, cuda.cupy.ndarray)
self.assertIsInstance(c2[0][1].x.data, cuda.cupy.ndarray)
@attr.multi_gpu(2)
def test_copy_and_send_to_gpu_multi(self):
c2 = self.c2.copy()
with testing.assert_warns(DeprecationWarning):
self.c2.to_gpu(0)
with testing.assert_warns(DeprecationWarning):
c2.to_gpu(1)
self.assertEqual(self.c2[0][0].x.data.device.id, 0)
self.assertEqual(self.c2[0][1].x.data.device.id, 0)
self.assertEqual(c2[0][0].x.data.device.id, 1)
self.assertEqual(c2[0][1].x.data.device.id, 1)
def test_to_cpu_on_cpu(self):
x1 = self.l1.x.data
gx1 = self.l1.x.grad
x2 = self.l2.x.data
gx2 = self.l2.x.grad
x3 = self.l3.x.data
gx3 = self.l3.x.grad
with testing.assert_warns(DeprecationWarning):
self.c2.to_cpu()
self.assertIs(self.l1.x.data, x1)
self.assertIs(self.l1.x.grad, gx1)
self.assertIs(self.l2.x.data, x2)
self.assertIs(self.l2.x.grad, gx2)
self.assertIs(self.l3.x.data, x3)
self.assertIs(self.l3.x.grad, gx3)
@attr.gpu
def test_to_cpu(self):
with testing.assert_warns(DeprecationWarning):
self.c2.to_gpu()
with testing.assert_warns(DeprecationWarning):
self.c2.to_cpu()
self.assertIs(self.c2.xp, numpy)
self.assertIs(self.c1.xp, numpy)
self.assertIs(self.l1.xp, numpy)
self.assertIs(self.l2.xp, numpy)
self.assertIs(self.l3.xp, numpy)
self.assertIsInstance(self.l1.x.data, numpy.ndarray)
self.assertIsInstance(self.l1.x.grad, numpy.ndarray)
self.assertIsInstance(self.l2.x.data, numpy.ndarray)
self.assertIsInstance(self.l2.x.grad, numpy.ndarray)
self.assertIsInstance(self.l3.x.data, numpy.ndarray)
self.assertIsInstance(self.l3.x.grad, numpy.ndarray)
@attr.gpu
def test_to_gpu(self):
cupy = cuda.cupy
with testing.assert_warns(DeprecationWarning):
self.c2.to_gpu()
self.assertIs(self.c2.xp, cupy)
self.assertIs(self.c1.xp, cupy)
self.assertIs(self.l1.xp, cupy)
self.assertIs(self.l2.xp, cupy)
self.assertIs(self.l3.xp, cupy)
self.assertIsInstance(self.l1.x.data, cupy.ndarray)
self.assertIsInstance(self.l1.x.grad, cupy.ndarray)
self.assertIsInstance(self.l2.x.data, cupy.ndarray)
self.assertIsInstance(self.l2.x.grad, cupy.ndarray)
self.assertIsInstance(self.l3.x.data, cupy.ndarray)
self.assertIsInstance(self.l3.x.grad, cupy.ndarray)
@attr.chainerx
def test_to_chx(self):
self.c2.to_device(backend.CpuDevice())
self.c2.to_chx()
self.assertIs(self.c2.xp, chainerx)
self.assertIs(self.c1.xp, chainerx)
self.assertIs(self.l1.xp, chainerx)
self.assertIs(self.l2.xp, chainerx)
self.assertIs(self.l3.xp, chainerx)
self.assertIsInstance(self.l1.x.data, chainerx.ndarray)
self.assertIsInstance(self.l1.x.grad, chainerx.ndarray)
self.assertIsInstance(self.l2.x.data, chainerx.ndarray)
self.assertIsInstance(self.l2.x.grad, chainerx.ndarray)
self.assertIsInstance(self.l3.x.data, chainerx.ndarray)
self.assertIsInstance(self.l3.x.grad, chainerx.ndarray)
expected_device = chainerx.get_device('native:0')
self.assertIs(self.l1.x.data.device, expected_device)
self.assertIs(self.l1.x.grad.device, expected_device)
self.assertIs(self.l2.x.data.device, expected_device)
self.assertIs(self.l2.x.grad.device, expected_device)
self.assertIs(self.l3.x.data.device, expected_device)
self.assertIs(self.l3.x.grad.device, expected_device)
def test_to_device(self):
device = backend.CpuDevice()
self.c2.to_device(device)
self.assertIs(self.c2.xp, numpy)
self.assertIs(self.c1.xp, numpy)
self.assertIs(self.l1.xp, numpy)
self.assertIs(self.l2.xp, numpy)
self.assertIs(self.l3.xp, numpy)
self.assertIsInstance(self.l1.x.data, numpy.ndarray)
self.assertIsInstance(self.l1.x.grad, numpy.ndarray)
self.assertIsInstance(self.l2.x.data, numpy.ndarray)
self.assertIsInstance(self.l2.x.grad, numpy.ndarray)
self.assertIsInstance(self.l3.x.data, numpy.ndarray)
self.assertIsInstance(self.l3.x.grad, numpy.ndarray)
def test_params(self):
params = list(self.c2.params())
self.assertEqual([id(p) for p in params],
[id(self.l1.x), id(self.l1.y),
id(self.l2.x), id(self.l3.x)])
def test_params_skip_uninit(self):
params = list(self.c2.params(include_uninit=False))
self.assertEqual([id(p) for p in params],
[id(self.l1.x), id(self.l2.x), id(self.l3.x)])
def test_namedparams(self):
namedparams = list(self.c2.namedparams())
self.assertEqual([(name, id(p)) for name, p in namedparams],
[('/0/0/x', id(self.l1.x)),
('/0/0/y', id(self.l1.y)),
('/0/1/x', id(self.l2.x)),
('/1/x', id(self.l3.x))])
def test_namedparams_skip_uninit(self):
namedparams = list(self.c2.namedparams(include_uninit=False))
self.assertEqual([(name, id(p)) for name, p in namedparams],
[('/0/0/x', id(self.l1.x)),
('/0/1/x', id(self.l2.x)),
('/1/x', id(self.l3.x))])
def test_links(self):
links = list(self.c2.links())
self.assertEqual([id(l) for l in links],
[id(l) for l in [self.c2,
self.c1, self.l1, self.l2,
self.l3]])
def test_links_skipself(self):
links = list(self.c2.links(skipself=True))
self.assertEqual([id(l) for l in links],
[id(l) for l in [self.c1, self.l1, self.l2,
self.l3]])
def test_namedlinks(self):
namedlinks = list(self.c2.namedlinks())
self.assertEqual([(name, id(l)) for name, l in namedlinks],
[('/', id(self.c2)),
('/0', id(self.c1)),
('/0/0', id(self.l1)),
('/0/1', id(self.l2)),
('/1', id(self.l3))])
def test_namedlinks_skipself(self):
namedlinks = list(self.c2.namedlinks(skipself=True))
self.assertEqual([(name, id(l)) for name, l in namedlinks],
[('/0', id(self.c1)),
('/0/0', id(self.l1)),
('/0/1', id(self.l2)),
('/1', id(self.l3))])
def test_children(self):
self.assertEqual(tuple(id(c) for c in self.c2.children()),
(id(self.c1), id(self.l3)))
self.assertEqual(tuple(id(c) for c in self.c1.children()),
(id(self.l1), id(self.l2)))
def test_copyparams(self):
l1 = chainer.Link()
with l1.init_scope():
l1.x = chainer.Parameter(shape=(2, 3))
l1.y = chainer.Parameter()
l2 = chainer.Link()
with l2.init_scope():
l2.x = chainer.Parameter(shape=2)
l3 = chainer.Link()
with l3.init_scope():
l3.x = chainer.Parameter(shape=3)
c1 = chainer.ChainList(l1, l2)
c2 = chainer.ChainList(c1, l3)
l1.x.data.fill(0)
l2.x.data.fill(1)
l3.x.data.fill(2)
self.c2.copyparams(c2)
numpy.testing.assert_array_equal(self.l1.x.data, l1.x.data)
numpy.testing.assert_array_equal(self.l2.x.data, l2.x.data)
numpy.testing.assert_array_equal(self.l3.x.data, l3.x.data)
def test_zerograds(self):
with testing.assert_warns(DeprecationWarning):
self.c2.zerograds()
numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3)))
numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2))
numpy.testing.assert_array_equal(self.l3.x.grad, numpy.zeros(3))
self.l1.y.initialize((2, 3))
numpy.testing.assert_array_equal(self.l1.y.grad, numpy.zeros((2, 3)))
def test_cleargrads(self):
self.c2.cleargrads()
self.assertIsNone(self.l1.x.grad)
self.assertIsNone(self.l2.x.grad)
self.assertIsNone(self.l3.x.grad)
self.l1.y.initialize((2, 3))
self.assertIsNone(self.l1.y.grad)
def test_addgrads(self):
l1 = chainer.Link()
with l1.init_scope():
l1.x = chainer.Parameter(shape=(2, 3))
l1.y = chainer.Parameter(shape=(2, 3))
l2 = chainer.Link()
with l2.init_scope():
l2.x = chainer.Parameter(shape=2)
l3 = chainer.Link()
with l3.init_scope():
l3.x = chainer.Parameter(shape=3)
c1 = chainer.ChainList(l1, l2)
c2 = chainer.ChainList(c1, l3)
l1.x.grad.fill(1)
l2.x.grad.fill(2)
l3.x.grad.fill(3)
l1.y.grad.fill(4)
self.l1.x.grad.fill(-1)
self.l1.y.cleargrad()
self.l2.x.grad.fill(-2)
self.l3.x.grad.fill(-3)
self.c2.addgrads(c2)
numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3)))
numpy.testing.assert_array_equal(self.l1.y.grad, l1.y.grad)
numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2))
numpy.testing.assert_array_equal(self.l3.x.grad, numpy.zeros(3))
def test_serialize(self):
l1 = chainer.Link()
with l1.init_scope():
l1.y = chainer.Parameter(shape=(1, 1))
l2 = chainer.Link()
with l2.init_scope():
l2.x = chainer.Parameter(0, 2)
c1 = chainer.ChainList(l1, l2)
mocks = {'0': mock.MagicMock(), '1': mock.MagicMock()}
serializer = mock.MagicMock()
serializer.__getitem__.side_effect = lambda k: mocks[k]
serializer.return_value = None
mocks['0'].return_value = None
mocks['1'].return_value = None
c1.serialize(serializer)
self.assertEqual(serializer.call_count, 0)
self.assertEqual(serializer.__getitem__.call_count, 2)
serializer.__getitem__.assert_any_call('0')
serializer.__getitem__.assert_any_call('1')
mocks['0'].assert_called_with('y', l1.y.data)
mocks['1'].assert_called_with('x', l2.x.data)
def test_count_params(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert self.c1.count_params() == 8
assert len(w) == 1
assert w[0].category is UserWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.c2.count_params()
assert len(w) == 1
assert w[0].category is UserWarning
self.c2[0][0].y.initialize((2, 3))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.c2.count_params()
assert not w
class TestChainListRepeat(unittest.TestCase):
def setUp(self):
class ChainListForTest(chainer.ChainList):
def __init__(self):
super(ChainListForTest, self).__init__(chainer.Link())
def forward(self):
pass
self.chainlist = ChainListForTest()
self.link = self.chainlist[0]
with self.link.init_scope():
self.link.x = chainer.Parameter(
chainer.initializers.Normal(), shape=(2, 3))
def test_no_repeat(self):
ret = self.chainlist.repeat(0)
self.assertEqual(len(ret), 0)
def test_repeat_with_share_mode(self):
ret = self.chainlist.repeat(2, mode='share')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chainlist)
self.assertIsNot(ret[1], self.chainlist)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0][0], self.chainlist[0])
self.assertIsNot(ret[1][0], self.chainlist[0])
self.assertIsNot(ret[0][0], ret[1][0])
self.assertIsNot(ret[0][0].x, self.chainlist[0].x)
self.assertIsNot(ret[1][0].x, self.chainlist[0].x)
self.assertIsNot(ret[0][0].x, ret[1][0].x)
self.assertIs(ret[0][0].x.data, self.chainlist[0].x.data)
self.assertIs(ret[0][0].x.data, ret[1][0].x.data)
self.assertEqual(ret[0][0].x.shape, self.chainlist[0].x.shape)
self.assertEqual(ret[0][0].x.shape, ret[1][0].x.shape)
self.assertEqual(ret[0][0].x.dtype, self.chainlist[0].x.dtype)
self.assertEqual(ret[0][0].x.dtype, ret[1][0].x.dtype)
def test_repeat_with_copy_mode(self):
ret = self.chainlist.repeat(2, mode='copy')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chainlist)
self.assertIsNot(ret[1], self.chainlist)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0][0], self.chainlist[0])
self.assertIsNot(ret[1][0], self.chainlist[0])
self.assertIsNot(ret[0][0], ret[1][0])
self.assertIsNot(ret[0][0].x, self.chainlist[0].x)
self.assertIsNot(ret[1][0].x, self.chainlist[0].x)
self.assertIsNot(ret[0][0].x, ret[1][0].x)
self.assertIsNot(ret[0][0].x.data, self.chainlist[0].x.data)
self.assertIsNot(ret[1][0].x.data, self.chainlist[0].x.data)
self.assertIsNot(ret[0][0].x.data, ret[1][0].x.data)
self.assertTrue(numpy.array_equal(
ret[0][0].x.data, self.chainlist[0].x.data))
self.assertTrue(numpy.array_equal(
ret[0][0].x.data, ret[1][0].x.data))
self.assertEqual(ret[0][0].x.shape, self.chainlist[0].x.shape)
self.assertEqual(ret[0][0].x.shape, ret[1][0].x.shape)
self.assertEqual(ret[0][0].x.dtype, self.chainlist[0].x.dtype)
self.assertEqual(ret[0][0].x.dtype, ret[1][0].x.dtype)
def test_repeat_with_init_mode(self):
ret = self.chainlist.repeat(2, mode='init')
self.assertEqual(len(ret), 2)
self.assertIsNot(ret[0], self.chainlist)
self.assertIsNot(ret[1], self.chainlist)
self.assertIsNot(ret[0], ret[1])
self.assertIsNot(ret[0][0], self.chainlist[0])
self.assertIsNot(ret[1][0], self.chainlist[0])
self.assertIsNot(ret[0][0], ret[1][0])
self.assertIsNot(ret[0][0].x, self.chainlist[0].x)
self.assertIsNot(ret[1][0].x, self.chainlist[0].x)
self.assertIsNot(ret[0][0].x, ret[1][0].x)
self.assertIsNot(ret[0][0].x.data, self.chainlist[0].x.data)
self.assertIsNot(ret[1][0].x.data, self.chainlist[0].x.data)
self.assertIsNot(ret[0][0].x.data, ret[1][0].x.data)
self.assertFalse(numpy.array_equal(
ret[0][0].x.data, self.chainlist[0].x.data))
self.assertFalse(numpy.array_equal(
ret[1][0].x.data, self.chainlist[0].x.data))
self.assertFalse(numpy.array_equal(
ret[0][0].x.data, ret[1][0].x.data))
self.assertEqual(ret[0][0].x.shape, self.chainlist[0].x.shape)
self.assertEqual(ret[0][0].x.shape, ret[1][0].x.shape)
self.assertEqual(ret[0][0].x.dtype, self.chainlist[0].x.dtype)
self.assertEqual(ret[0][0].x.dtype, ret[1][0].x.dtype)
@attr.ideep
class TestIntel64(unittest.TestCase):
def setUp(self):
self.link = chainer.Link()
shape = (2, 2)
dtype = numpy.float32
y_array = numpy.random.rand(*shape).astype(dtype)
pa_array = numpy.random.rand(*shape).astype(dtype)
ps_scalar = 2.4
with self.link.init_scope():
# Initialized parameter
self.link.y = chainer.Parameter(y_array)
# Uninitialized parameter
self.link.v = chainer.Parameter()
# Persistent ndarray
self.link.add_persistent('pa', pa_array)
# Persistent scalar
self.link.add_persistent('ps', ps_scalar)
self.y_array = y_array
self.pa_array = pa_array
self.ps_scalar = ps_scalar
def test_cpu_to_intel64(self):
link = self.link
with testing.assert_warns(DeprecationWarning):
link.to_intel64()
assert isinstance(link.device, backend.Intel64Device)
# Arrays should be converted to ideep.mdarray
# Initialized parameter
assert isinstance(link.y.data, intel64.ideep.mdarray)
_assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, intel64.ideep.mdarray)
_assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
def test_intel64_to_intel64(self):
link = self.link
with testing.assert_warns(DeprecationWarning):
link.to_intel64()
prev_y = link.y
prev_v = link.v
prev_pa = link.pa
prev_ps = link.ps
with testing.assert_warns(DeprecationWarning):
link.to_intel64()
assert isinstance(link.device, backend.Intel64Device)
# Everything should be left untouched
# Initialized parameter
assert link.y is prev_y
# Uninitialized parameter
assert link.v is prev_v
# Persistent ndarray
assert link.pa is prev_pa
# Persistent scalar
assert link.ps is prev_ps
@attr.gpu
def test_gpu_to_intel64(self):
link = self.link
with testing.assert_warns(DeprecationWarning):
link.to_gpu()
assert link.device.device == cuda.Device(0)
with testing.assert_warns(DeprecationWarning):
link.to_intel64()
assert isinstance(link.device, backend.Intel64Device)
# Arrays should be converted to ideep.mdarray
# Initialized parameter
assert isinstance(link.y.data, intel64.ideep.mdarray)
_assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, intel64.ideep.mdarray)
_assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
@attr.gpu
def test_intel64_to_gpu(self):
link = self.link
with testing.assert_warns(DeprecationWarning):
link.to_intel64()
assert isinstance(link.device, backend.Intel64Device)
with testing.assert_warns(DeprecationWarning):
link.to_gpu()
assert link.device.device == cuda.Device(0)
# Arrays should be converted to cupy.ndarray
# Initialized parameter
assert isinstance(link.y.data, cuda.cupy.ndarray)
_assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, cuda.ndarray)
_assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
def test_intel64_to_cpu(self):
link = self.link
with testing.assert_warns(DeprecationWarning):
link.to_intel64()
assert isinstance(link.device, backend.Intel64Device)
with testing.assert_warns(DeprecationWarning):
link.to_cpu()
assert isinstance(link.device, backend.CpuDevice)
# Arrays should be converted to numpy.ndarray
# Initialized parameter
assert isinstance(link.y.data, numpy.ndarray)
_assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, numpy.ndarray)
_assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
def test_cpu_to_intel64_unsupported(self):
# Test for persistents that cannot be transferred to iDeep.
with self.link.init_scope():
self.link.no_ideep = numpy.ones((2, 2, 2), numpy.float32)
self.link.register_persistent('no_ideep')
with testing.assert_warns(DeprecationWarning):
self.link.to_intel64()
assert isinstance(self.link.no_ideep, numpy.ndarray)
@attr.gpu
def test_gpu_to_intel64_unsupported(self):
# Test for persistents that cannot be transferred to iDeep.
with self.link.init_scope():
self.link.no_ideep = cuda.cupy.ones((2, 2, 2), numpy.float32)
self.link.register_persistent('no_ideep')
with testing.assert_warns(DeprecationWarning):
self.link.to_intel64()
assert isinstance(self.link.no_ideep, numpy.ndarray)
@attr.chainerx
class TestToChainerX(unittest.TestCase):
def setUp(self):
self.link = chainer.Link()
shape = (2, 2)
dtype = numpy.float32
y_array = numpy.random.rand(*shape).astype(dtype)
pa_array = numpy.random.rand(*shape).astype(dtype)
ps_scalar = 2.4
with self.link.init_scope():
# Initialized parameter
self.link.y = chainer.Parameter(y_array)
# Uninitialized parameter
self.link.v = chainer.Parameter()
# Persistent ndarray
self.link.add_persistent('pa', pa_array)
# Persistent scalar
self.link.add_persistent('ps', ps_scalar)
self.y_array = y_array
self.pa_array = pa_array
self.ps_scalar = ps_scalar
def test_chainerx_to_chx(self):
link = self.link
link.to_chx()
prev_y = link.y
prev_v = link.v
prev_pa = link.pa
prev_ps = link.ps
link.to_chx()
assert link.device.device == chainerx.get_device('native:0')
# Everything should be left untouched
# Initialized parameter
assert link.y is prev_y
# Uninitialized parameter
assert link.v is prev_v
# Persistent ndarray
assert link.pa is prev_pa
# Persistent scalar
assert link.ps is prev_ps
def test_cpu_to_chx(self):
link = self.link
link.to_chx()
# Initialized parameter
assert isinstance(link.y.data, chainerx.ndarray)
assert link.y.data.device.backend.name == 'native'
assert link.y.data.device.index == 0
_assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, chainerx.ndarray)
_assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
@attr.gpu
def test_gpu_to_chx(self):
link = self.link
with testing.assert_warns(DeprecationWarning):
link.to_gpu()
assert link.device.device == cuda.Device(0)
link.to_chx()
assert link.device.device == chainerx.get_device('cuda:0')
# Arrays should be converted to chainerx.ndarray
# Initialized parameter
assert isinstance(link.y.data, chainerx.ndarray)
assert link.y.data.device.backend.name == 'cuda'
assert link.y.data.device.index == 0
_assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, chainerx.ndarray)
_assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
# TODO(niboshi): Add other test variations
class TestToDevice(unittest.TestCase):
def setUp(self):
self.link = chainer.Link()
shape = (2, 2)
dtype = numpy.float32
y_array = numpy.random.rand(*shape).astype(dtype)
pa_array = numpy.random.rand(*shape).astype(dtype)
ps_scalar = 2.4
with self.link.init_scope():
# Initialized parameter
self.link.y = chainer.Parameter(y_array)
# Uninitialized parameter
self.link.v = chainer.Parameter()
# Persistent ndarray
self.link.add_persistent('pa', pa_array)
# Persistent scalar
self.link.add_persistent('ps', ps_scalar)
self.y_array = y_array
self.pa_array = pa_array
self.ps_scalar = ps_scalar
if cuda.available:
self.current_device_id = cuda.cupy.cuda.get_device_id()
def check_to_device(self, device_spec, expected_ndarray_type):
link = self.link
link.to_device(device_spec)
# Initialized parameter
assert isinstance(link.y.data, expected_ndarray_type)
_assert_variable_array_equal(link.y, self.y_array)
# Uninitialized parameter
assert link.v.data is None
# Persistent ndarray
assert isinstance(link.pa, expected_ndarray_type)
_assert_arrays_equal(link.pa, self.pa_array)
# Persistent scalar
assert link.ps == self.ps_scalar
return link
def test_to_device_numpy(self):
link = self.check_to_device('@numpy', numpy.ndarray)
assert isinstance(link.device, backend.CpuDevice)
@attr.gpu
def test_to_device_cupy(self):
link = self.check_to_device('@cupy:0', cuda.ndarray)
assert link.device.device == cuda.Device(0)
@attr.chainerx
def test_to_device_chainerx(self):
link = self.check_to_device('native:0', chainerx.ndarray)
assert link.device.device == chainerx.get_device('native:0')
class TestCallMethod(unittest.TestCase):
def setUp(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
self.model = Model()
def test_has_forward_no_call(self):
self.model.forward = mock.MagicMock()
self.model(0) # model.forward is called
self.model.forward.assert_called_once()
def test_has_call_and_forward(self):
self.model.__call__ = mock.MagicMock()
self.model.forward = mock.MagicMock()
self.model(0) # Link.__call__ is called
self.model.forward.assert_called_with(0)
self.model.__call__.assert_not_called()
def test_has_call_no_forward(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
self.mock = mock.MagicMock()
def __call__(self, x):
self.mock(x)
model = Model()
model(0) # model.__call__ is called
model.mock.assert_called_with(0)
def test_no_call_no_forward(self):
with self.assertRaises(AttributeError):
self.model(0)
class TestLinkOverrideToDeviceMethods(unittest.TestCase):
# Overriding to_cpu, to_gpu, to_intel64 is deprecated.
# This test ensures DeprecationWarning is emitted and the overridden
# method is actually called.
def create_link(self, method_name):
class ChildLink(chainer.Link):
def __init__(self):
self.to_method_called = 0
super(ChildLink, self).__init__()
if method_name == 'to_device':
def to_device(self, device):
assert False # never called
elif method_name == 'to_chx':
def to_chx(self, device):
assert False # never called
elif method_name == 'from_chx':
def from_chx(self, device):
assert False # never called
elif method_name == 'to_cpu':
def to_cpu(self):
with testing.assert_warns(DeprecationWarning):
super(ChildLink, self).to_cpu()
self.to_method_called += 1
elif method_name == 'to_gpu':
def to_gpu(self, device=None):
assert isinstance(device, (cuda.Device, int))
with testing.assert_warns(DeprecationWarning):
super(ChildLink, self).to_gpu(device)
self.to_method_called += 1
elif method_name == 'to_intel64':
def to_intel64(self):
with testing.assert_warns(DeprecationWarning):
super(ChildLink, self).to_intel64()
self.to_method_called += 1
else:
assert False, method_name
class ParentLink(chainer.Chain):
def __init__(self):
super(ParentLink, self).__init__()
with self.init_scope():
self.child = ChildLink()
return ParentLink
# to_device, to_chx, from_chx can never be overridden
def test_to_device_override(self):
with pytest.raises(TypeError):
self.create_link('to_device')
def test_to_chx_override(self):
with pytest.raises(TypeError):
self.create_link('to_chx')
def test_from_chx_override(self):
with pytest.raises(TypeError):
self.create_link('from_chx')
# Deprecation warning is emitted on class definition
def test_to_cpu_override(self):
with testing.assert_warns(DeprecationWarning):
self.create_link('to_cpu')
def test_to_gpu_override(self):
with testing.assert_warns(DeprecationWarning):
self.create_link('to_gpu')
def test_to_intel64_override(self):
with testing.assert_warns(DeprecationWarning):
self.create_link('to_intel64')
# Overridden methods are called on to_device()
def test_to_device_cpu(self):
with testing.assert_warns(DeprecationWarning):
cls = self.create_link('to_cpu')
l = cls()
l.to_device('@numpy')
assert l.child.to_method_called == 1
@attr.gpu
def test_to_device_gpu(self):
with testing.assert_warns(DeprecationWarning):
cls = self.create_link('to_gpu')
l = cls()
l.to_device('@cupy:0')
assert l.child.to_method_called == 1
@attr.multi_gpu(2)
def test_to_device_multi_gpu(self):
with testing.assert_warns(DeprecationWarning):
cls = self.create_link('to_gpu')
l = cls()
l.to_device('@cupy:1')
assert l.child.to_method_called == 1
@attr.ideep
def test_to_device_intel64(self):
with testing.assert_warns(DeprecationWarning):
cls = self.create_link('to_intel64')
l = cls()
l.to_device('@intel64')
assert l.child.to_method_called == 1
# Overridden methods are called on to_cpu()/to_gpu()/to_intel()
def test_to_cpu(self):
with testing.assert_warns(DeprecationWarning):
cls = self.create_link('to_cpu')
l = cls()
with testing.assert_warns(DeprecationWarning):
l.to_cpu()
assert l.child.to_method_called == 1
@attr.gpu
def test_to_gpu_without_arg(self):
with testing.assert_warns(DeprecationWarning):
cls = self.create_link('to_gpu')
l = cls()
with testing.assert_warns(DeprecationWarning):
l.to_gpu()
assert l.child.to_method_called == 1
@attr.gpu
def test_to_gpu_with_arg(self):
with testing.assert_warns(DeprecationWarning):
cls = self.create_link('to_gpu')
l = cls()
with testing.assert_warns(DeprecationWarning):
l.to_gpu(0)
assert l.child.to_method_called == 1
@attr.ideep
def test_to_intel64(self):
with testing.assert_warns(DeprecationWarning):
cls = self.create_link('to_intel64')
l = cls()
with testing.assert_warns(DeprecationWarning):
l.to_intel64()
assert l.child.to_method_called == 1
@_inject_backend_tests
class TestSerialize(unittest.TestCase):
def setUp(self):
self.array = numpy.array([1, 2, 3], dtype=numpy.float32)
self.serializer = mock.MagicMock(return_value=self.array)
link = chainer.Link()
with link.init_scope():
link.x = chainer.Parameter()
link.y = chainer.Parameter()
link.add_persistent('z', None)
self.link = link
def test_serialize_numpy(self, backend_config):
array = self.array
link = self.link
serializer = self.serializer
link.to_device(backend_config.device)
link.serialize(serializer)
self.assertEqual(serializer.call_count, 3)
cpu_device = chainer.backend.CpuDevice()
numpy.testing.assert_array_equal(cpu_device.send(link.x.array), array)
numpy.testing.assert_array_equal(cpu_device.send(link.y.array), array)
numpy.testing.assert_array_equal(cpu_device.send(link.z), array)
testing.run_module(__name__, __file__)
| 103,809
| 36.194554
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_backprop_utils.py
|
import unittest
import mock
import numpy
import six
import chainer
from chainer import _backprop_utils
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
def make_array(start, shape, dtype):
size = numpy.product(shape, dtype='i')
a = numpy.arange(start, start + size)
a = a.reshape(shape)
a = a.astype(dtype, copy=False)
return a
class FuncWithBackward(chainer.FunctionNode):
def backward(self, target_input_indexes, grad_outputs):
return self._mock_backward(target_input_indexes, grad_outputs)
class FuncWithBackwardAccumulate(chainer.FunctionNode):
def backward_accumulate(self, target_input_indexes, grad_outputs,
grad_inputs):
"""Computes gradients w.r.t.\\ specified inputs and accumulates them.
This method provides a way to fuse the backward computation and the
gradient accumulations in the case that the multiple functions are
applied to the same variable.
Users have to override either of this method or :meth:`backward`.
It is often simpler to implement :meth:`backward` and is recommended
if you do not need to provide efficient gradient accumulation.
Args:
target_input_indexes (tuple of int): Indices of the input variables
w.r.t. which the gradients are required. It is guaranteed that
this tuple contains at least one element.
grad_outputs (tuple of Variable): Gradients w.r.t. the output
variables. If the gradient w.r.t. an output variable is not
given, the corresponding element is ``None``.
grad_inputs (tuple of Variable): Gradients w.r.t. the input
variables specified by ``target_input_indexes``. These values
are computed by other computation paths. If there is no
gradient value existing for the variable, the corresponding
element is ``None``. See also the note below.
Returns:
Tuple of variables that represent the gradients w.r.t. specified
input variables. Unlike :meth:`backward`, the length of the tuple
**must** be same as that of ``target_input_indices``.
.. note::
When the same variable is passed to the multiple input arguments of
a function, only the first position of ``grad_inputs`` corresponding
to these input arguments may contain the gradient variable
corresponding to that input variable, and other entries are set to
``None``. This is an implementation-detail convention to avoid the
complication of correctly accumulating gradients in such a case.
This behavior might be changed in a future version.
"""
assert isinstance(target_input_indexes, tuple)
assert isinstance(grad_outputs, tuple)
assert isinstance(grad_inputs, tuple)
# The default implementation uses backward(). You can override this
# method without using backward().
gxs = self._mock_backward(target_input_indexes, grad_outputs)
len_gxs = len(gxs)
if len_gxs == len(self.inputs):
gxs = tuple([gxs[i] for i in target_input_indexes])
elif len_gxs != len(target_input_indexes):
raise ValueError(
'number of gradients returned by %s (%s) is incorrect.'
% (self._impl_name, self.label))
return tuple([gx if g_input is None else
g_input if gx is None else
gx + g_input
for gx, g_input in six.moves.zip(gxs, grad_inputs)])
@testing.parameterize(*testing.product({
'y_shape': [(4,), (0,), (2, 3), ()],
'x_shape': [(3,), (0,), (4, 1), ()],
'override': ['backward', 'backward_accumulate'],
}))
class TestFunctionNode(unittest.TestCase):
def _get_method(self, prefix, gpu):
suffix = 'gpu' if gpu else 'cpu'
return getattr(self.f, prefix + '_' + suffix)
def setUp(self):
y_shape = self.y_shape
x_shape = self.x_shape
y1 = make_array(1, y_shape, numpy.float32)
y2 = make_array(2, y_shape, numpy.float32)
gx1 = chainer.Variable(
make_array(1, x_shape, numpy.float32))
gx2 = None
gy1 = make_array(1, y_shape, numpy.float32)
gy2 = make_array(1, y_shape, numpy.float32)
f = {
'backward': FuncWithBackward,
'backward_accumulate': FuncWithBackwardAccumulate,
}[self.override]()
f._mock_backward = mock.MagicMock(return_value=(gx1, gx2))
f.check_type_forward = mock.MagicMock()
f.forward_cpu = mock.MagicMock(return_value=(y1, y2))
f.forward_gpu = mock.MagicMock()
self.f = f
self.x1 = make_array(0, x_shape, numpy.float32)
self.x2 = make_array(0, x_shape, numpy.int32)
self.y1 = y1
self.y2 = y2
self.gx1 = gx1
self.gx2 = gx2
self.gx1_orig = chainer.Variable(
make_array(3, x_shape, numpy.float32))
self.gx2_orig = chainer.Variable(
make_array(2, x_shape, numpy.float32))
self.gx1_accum = gx1 + self.gx1_orig
self.gy1 = gy1
self.gy2 = gy2
def tearDown(self):
# Set None to delete cuda array
self.f = None
self.y1 = None
self.y2 = None
self.gx1 = None
def setup_gpu(self):
self.x1 = cuda.to_gpu(self.x1)
self.x2 = cuda.to_gpu(self.x2)
self.y1 = cuda.to_gpu(self.y1)
self.y2 = cuda.to_gpu(self.y2)
self.gx1.to_gpu()
self.gx1_orig.to_gpu()
self.gx2_orig.to_gpu()
self.gx1_accum.to_gpu()
self.gy1 = cuda.to_gpu(self.gy1)
self.gy2 = cuda.to_gpu(self.gy2)
self.f.forward_gpu = mock.MagicMock(return_value=(self.y1, self.y2))
self.f._mock_backward = mock.MagicMock(
return_value=(self.gx1, self.gx2))
def check_backprop_step(self, gxs):
flag_none = gxs[0] is None
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
self.f.inputs = (x1.node, x2.node)
gxrefs = [[gx] if gx is not None else [] for gx in gxs]
grad_outputs = (self.gy1, self.gy2)
grad_inputs = dict(zip(self.f.inputs, gxrefs))
_backprop_utils.backprop_step(
self.f, (0, 1), grad_outputs, grad_inputs, True)
if not chainer.configuration.config.lazy_grad_sum:
# assert eager grad sum
for gxref in gxrefs:
self.assertLessEqual(len(gxref), 1)
gx1 = _backprop_utils._reduce(gxrefs[0])
gx2 = _backprop_utils._reduce(gxrefs[1])
if flag_none:
numpy.testing.assert_array_equal(cuda.to_cpu(gx1.data),
cuda.to_cpu(self.gx1.data))
self.assertIsNone(gx2)
else:
numpy.testing.assert_array_equal(cuda.to_cpu(gx1.data),
cuda.to_cpu(self.gx1_accum.data))
numpy.testing.assert_array_equal(cuda.to_cpu(gx2.data),
cuda.to_cpu(self.gx2_orig.data))
def test_backprop_step_none_cpu(self):
self.check_backprop_step((None, None))
@attr.gpu
def test_backprop_step_none_gpu(self):
self.setup_gpu()
self.check_backprop_step((None, None))
def test_backprop_step_cpu(self):
self.check_backprop_step((self.gx1_orig, self.gx2_orig))
@attr.gpu
def test_backprop_step_gpu(self):
self.setup_gpu()
self.check_backprop_step((self.gx1_orig, self.gx2_orig))
testing.run_module(__name__, __file__)
| 7,801
| 36.873786
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_gradient_check.py
|
import math
import unittest
import warnings
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
from chainer.testing import condition
import chainerx
def _uniform(*shape):
return numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
def _full_like(x, val):
xp = chainer.backend.get_array_module(x)
return xp.full_like(x, val)
def _zeros_like(x):
xp = chainer.backend.get_array_module(x)
return xp.zeros_like(x)
def _dot(x, y):
return sum(map(lambda a: a[0] * a[1], zip(x, y)))
class NumericalGradientTest(unittest.TestCase):
in_shapes = ((2, 1),)
gout_shapes = ((2, 1),)
eps = None
atol = 1e-3
rtol = 1e-3
def f(self, xs):
return xs[0] ** 2,
def df(self, xs):
return (2 * xs[0],),
def setUp(self):
self.xs = tuple([_uniform(*s) for s in self.in_shapes])
self.gys = tuple([
None if s is None else _uniform(*s) for s in self.gout_shapes])
def check_numerical_grad_one(self, f, df, xs, gys, eps):
dfxs = df(xs)
gys = tuple(0 if gy is None else gy for gy in gys)
# matrix-vector multiplication of dfxs and dys
dx_expect = tuple(map(lambda dfx: _dot(dfx, gys), dfxs))
def func():
return f(xs)
dx_actual = gradient_check.numerical_grad(func, xs, gys, eps)
self.assertEqual(len(dx_expect), len(dx_actual))
for e, a in zip(dx_expect, dx_actual):
testing.assert_allclose(e, a, atol=self.atol, rtol=self.rtol)
def check_numerical_grad(self, f, df, xs, gys, eps=None):
if eps is None:
eps = tuple(10 ** (-i) for i in six.moves.range(2, 5))
elif not isinstance(eps, tuple):
eps = (eps, )
for e in eps:
self.check_numerical_grad_one(f, df, xs, gys, e)
def test_numerical_grad_cpu(self):
self.check_numerical_grad(self.f, self.df, self.xs, self.gys,
eps=self.eps)
@attr.gpu
def test_numerical_grad_gpu(self):
gys = tuple(None if gy is None else cuda.to_gpu(gy)
for gy in self.gys)
self.check_numerical_grad(self.f, self.df,
tuple(map(cuda.to_gpu, self.xs)), gys,
eps=self.eps)
class NumericalGradientTest2(NumericalGradientTest):
in_shapes = ((),)
gout_shapes = ((),)
def f(self, xs):
return 1,
def df(self, xs):
return (0,),
class NumericalGradientTest3(NumericalGradientTest):
in_shapes = ((2, 1),)
gout_shapes = ((2, 1),)
# Too small eps causes cancellation of significant digits
eps = (1e-2, 1e-3)
def f(self, xs):
xp = chainer.backend.get_array_module(*xs)
return xp.exp(xs[0]),
def df(self, xs):
xp = chainer.backend.get_array_module(*xs)
return (xp.exp(xs[0]),),
def setUp(self):
self.xs = (_uniform(2, 1),)
self.gys = (_uniform(2, 1),)
class NumericalGradientTest4(NumericalGradientTest):
in_shapes = ((2, 1), (2, 1))
gout_shapes = ((2, 1), (2, 1), (2, 1))
atol = 1e-2
rtol = 1e-2
def f(self, xs):
assert len(xs) == 2
return (2 * xs[0] + 3 * xs[1],
4 * xs[0] + 5 * xs[1],
6 * xs[0] + 7 * xs[1])
def df(self, xs):
assert len(xs) == 2
return (
(_full_like(xs[0], 2), _full_like(xs[0], 4), _full_like(xs[0], 6)),
(_full_like(xs[1], 3), _full_like(xs[1], 5), _full_like(xs[1], 7)))
class NumericalGradientTest5(NumericalGradientTest):
in_shapes = ((2, 1), (2, 1))
gout_shapes = ((2, 1), None, (2, 1))
atol = 5e-3
rtol = 5e-3
def f(self, xs):
assert len(xs) == 2
return (2 * xs[0] + 3 * xs[1],
4 * xs[0] + 5 * xs[1],
6 * xs[0] + 7 * xs[1])
def df(self, xs):
assert len(xs) == 2
return (
(_full_like(xs[0], 2), _zeros_like(xs[0]), _full_like(xs[0], 6)),
(_full_like(xs[1], 3), _zeros_like(xs[1]), _full_like(xs[1], 7)))
class NumericalGradientTest6(NumericalGradientTest):
in_shapes = ((2, 1),)
gout_shapes = (None,)
class NumericalGradientReferenceTest(unittest.TestCase):
def setUp(self):
self.x = _uniform(2, 3)
def check_reference(self, x):
# A returned value and an input refers the same memory.
# See issue #488
def func():
return x,
gx, = gradient_check.numerical_grad(func, (x,), (1,))
testing.assert_allclose(cuda.to_cpu(gx), 1)
def test_reference_cpu(self):
self.check_reference(self.x)
@attr.gpu
def test_reference_gpu(self):
self.check_reference(cuda.to_gpu(self.x))
class NumericalGradientInvalidEps(NumericalGradientTest):
def check_invalid_eps(self, xs, gys, eps):
with self.assertRaises(AssertionError):
self.check_numerical_grad(self.f, self.df, xs, gys, eps)
@condition.retry(3)
def test_numerical_grad_cpu(self):
self.check_invalid_eps(self.xs, self.gys, 0)
self.check_invalid_eps(self.xs, self.gys, -1.0)
@condition.retry(3)
@attr.gpu
def test_numerical_grad_gpu(self):
xs = tuple(map(cuda.to_gpu, self.xs))
gys = tuple(None if gy is None else cuda.to_gpu(gy)
for gy in self.gys)
self.check_invalid_eps(xs, gys, 0)
self.check_invalid_eps(xs, gys, -1.0)
class NumericalGradientInvalidType(unittest.TestCase):
def setUp(self):
self.x = numpy.array(0)
self.y = numpy.array(0)
self.f = lambda: None
@attr.gpu
def test_invalid_inputs(self):
y = cuda.to_gpu(self.y)
with self.assertRaises(RuntimeError):
gradient_check.numerical_grad(self.f, (self.x, y), ())
@attr.gpu
def test_invalid_outputs(self):
y = cuda.to_gpu(self.y)
with self.assertRaises(RuntimeError):
gradient_check.numerical_grad(self.f, (), (self.x, y))
@attr.gpu
def test_invalid_mixed(self):
y = cuda.to_gpu(self.y)
with self.assertRaises(RuntimeError):
gradient_check.numerical_grad(self.f, (self.x,), (y,))
class NumericalGradientEpsTest(unittest.TestCase):
def setUp(self):
self.x = numpy.array(0.0, dtype=numpy.float32)
self.y = numpy.array(1.0, dtype=numpy.float32)
def check_different_eps(self, x, y):
def f():
if -1 < x < 1:
return x.copy(),
elif -2 < x < 2:
return 2 * x,
else:
return 0,
gx, = gradient_check.numerical_grad(f, (x,), (y,), eps=0.5)
self.assertEqual(gx, 1.)
gx, = gradient_check.numerical_grad(f, (x,), (y,), eps=1.5)
self.assertEqual(gx, 2.)
gx, = gradient_check.numerical_grad(f, (x,), (y,), eps=2.5)
self.assertEqual(gx, 0.)
def test_differenct_eps_cpu(self):
self.check_different_eps(self.x, self.y)
@attr.gpu
def test_differenct_eps_gpu(self):
self.check_different_eps(cuda.to_gpu(self.x), cuda.to_gpu(self.y))
default_eps = 1e-3
# `result`: True if `func` is non-differentiable on `x`
@testing.parameterize(*[
{'func': 'zero', 'x': [-100.], 'result': False},
{'func': 'zero', 'x': [100.], 'result': False},
{'func': 'zero', 'x': [0.], 'result': False},
{'func': 'zero', 'x': [default_eps / 10], 'result': False},
{'func': 'zero', 'x': numpy.random.normal(size=(3, 2)), 'result': False},
{'func': 'zero', 'x': numpy.random.normal(size=()), 'result': False},
{'func': 'linear', 'x': [-100.], 'result': False},
{'func': 'linear', 'x': [100.], 'result': False},
{'func': 'linear', 'x': [0.], 'result': False},
{'func': 'linear', 'x': numpy.random.normal(size=(3, 2)), 'result': False},
{'func': 'linear', 'x': numpy.random.normal(size=()), 'result': False},
# (Invalid input domain)
{'func': 'linear', 'x': [numpy.inf], 'result': False,
'ignore_warning': RuntimeWarning},
{'func': 'quadratic', 'x': [-100.], 'result': False},
{'func': 'quadratic', 'x': [100.], 'result': False},
{'func': 'quadratic', 'x': [0.], 'result': False},
{'func': 'cubic', 'x': [-100.], 'result': False},
{'func': 'cubic', 'x': [100.], 'result': False},
{'func': 'cubic', 'x': [0.], 'result': False},
# Too large epsilon
{'func': 'cubic', 'x': [0.], 'eps': 1e-1, 'result': True},
{'func': 'abs', 'x': [0.], 'result': True},
{'func': 'abs', 'x': [[3, 1], [0, 2]], 'result': True},
{'func': 'abs', 'x': [default_eps * 0.8], 'result': True},
{'func': 'abs', 'x': [-default_eps * 0.8], 'result': True},
{'func': 'abs', 'x': [default_eps * 1.2], 'result': False},
{'func': 'abs', 'x': [-default_eps * 1.2], 'result': False},
{'func': 'abs', 'x': [100.], 'result': False},
{'func': 'abs', 'x': [-100.], 'result': False},
{'func': 'step', 'x': [0.], 'result': True},
{'func': 'step', 'x': [default_eps * 0.8], 'result': True},
{'func': 'step', 'x': [-default_eps * 0.8], 'result': True},
{'func': 'step', 'x': [default_eps * 1.2], 'result': False},
{'func': 'step', 'x': [-default_eps * 1.2], 'result': False},
{'func': 'step', 'x': [100.], 'result': False},
{'func': 'step', 'x': [-100.], 'result': False},
{'func': 'clip', 'x': [0.], 'result': True},
{'func': 'clip', 'x': [1.], 'result': True},
{'func': 'clip', 'x': [0.5], 'result': False},
{'func': 'floor', 'x': [0.], 'result': True},
{'func': 'floor', 'x': [100 + default_eps * 0.8], 'result': True},
{'func': 'floor', 'x': [100 - default_eps * 0.8], 'result': True},
{'func': 'floor', 'x': [100 + default_eps * 1.2], 'result': False},
{'func': 'floor', 'x': [100 - default_eps * 1.2], 'result': False},
{'func': 'exp', 'x': [-100], 'result': False},
{'func': 'exp', 'x': [0.], 'result': False},
{'func': 'exp', 'x': [13.], 'result': False},
{'func': 'log', 'x': [100.], 'result': False},
# (Smaller epsilon is required because slope is steep)
{'func': 'log', 'x': [1e-3], 'eps': 1e-6, 'result': False},
{'func': 'log', 'x': [0.], 'result': True,
'ignore_warning': RuntimeWarning},
# (Invalid input domain)
{'func': 'log', 'x': [-10.], 'result': False,
'ignore_warning': RuntimeWarning},
{'func': 'tan', 'x': [default_eps * 1.2], 'result': False},
{'func': 'tan', 'x': [default_eps * 0.8], 'result': False},
{'func': 'tan', 'x': [math.pi / 2], 'result': True},
{'func': 'tan', 'x': [-math.pi / 2], 'result': True},
{'func': 'tan', 'x': [3 * math.pi / 2], 'result': True},
{'func': 'tan', 'x': [3 * math.pi / 2 + default_eps * 0.8],
'result': True},
{'func': 'tan', 'x': [3 * math.pi / 2 - default_eps * 0.8],
'result': True},
# (Smaller epsilon is required because slope is steep)
{'func': 'tan', 'x': [3 * math.pi / 2 + 1e-3], 'eps': 1e-6,
'result': False},
# (Smaller epsilon is required because slope is steep)
{'func': 'tan', 'x': [3 * math.pi / 2 - 1e-3], 'eps': 1e-6,
'result': False},
{'func': 'nan_segment', 'x': [0.], 'result': False},
{'func': 'nan_segment', 'x': [-1.], 'result': True},
{'func': 'nan_segment', 'x': [1.], 'result': True},
])
class NumericalGradientDetectNondifferentiableTest(unittest.TestCase):
def setUp(self):
self.eps = getattr(self, 'eps', default_eps)
self.ignore_warning = getattr(self, 'ignore_warning', None)
def _func_zero(self, x):
xp = chainer.backend.get_array_module(x)
return xp.zeros_like(x),
def _func_linear(self, x):
return 2 * x,
def _func_quadratic(self, x):
return x * x + 2.,
def _func_cubic(self, x):
return -3 * x ** 3 + 2 * x ** 2 + 1,
def _func_abs(self, x):
return abs(x),
def _func_step(self, x):
xp = chainer.backend.get_array_module(x)
y = xp.zeros_like(x)
y[x > 0] = 1
return y,
def _func_clip(self, x):
y = x.clip(0, 1)
return y,
def _func_floor(self, x):
xp = chainer.backend.get_array_module(x)
return xp.floor(x),
def _func_exp(self, x):
xp = chainer.backend.get_array_module(x)
return xp.exp(x),
def _func_log(self, x):
xp = chainer.backend.get_array_module(x)
return xp.log(x),
def _func_tan(self, x):
xp = chainer.backend.get_array_module(x)
return xp.tan(x),
def _func_nan_segment(self, x):
xp = chainer.backend.get_array_module(x)
y = xp.ones_like(x)
y[-1 < x < 1] = numpy.nan
return y,
def check_positive(self, xp, func_name, input, eps, nout):
# Should be non-differentiable
func = getattr(self, '_func_{}'.format(func_name))
grad_outputs = [
xp.random.uniform(-1, 1, input.shape).astype(input.dtype)
for _ in range(nout)]
def f():
return func(input) * nout
try:
gradient_check.numerical_grad(
f, (input,), grad_outputs, eps=eps,
detect_nondifferentiable=True)
except gradient_check.NondifferentiableError:
pass
else:
raise AssertionError(
'Function `{}` is expected to be non-differentiable, '
'but determined to be differentiable.\n\n'
'eps: {}\n'
'input: {}\n'
'xp: {}\n'
''.format(
func_name, eps, input, xp.__name__))
def check_negative(self, xp, func_name, input, eps, nout):
# Should be differentiable
func = getattr(self, '_func_{}'.format(func_name))
grad_outputs = [
xp.random.uniform(-1, 1, input.shape).astype(input.dtype)
for _ in range(nout)]
def f():
return func(input) * nout
try:
gradient_check.numerical_grad(
f, (input,), grad_outputs, eps=eps,
detect_nondifferentiable=True)
except gradient_check.NondifferentiableError as e:
raise AssertionError(
'Function `{}` is expected to be differentiable, '
'but determined to be non-differentiable.\n\n'
'eps: {}\n'
'input: {}\n'
'xp: {}\n\n'
'{}: {}'
.format(
func_name, eps, input, xp.__name__,
e.__class__.__name__, e))
def check(self, xp, nout):
input = xp.asarray(self.x).astype(numpy.float32)
with warnings.catch_warnings():
if self.ignore_warning:
warnings.simplefilter('ignore', self.ignore_warning)
if self.result:
self.check_positive(xp, self.func, input, self.eps, nout)
else:
self.check_negative(xp, self.func, input, self.eps, nout)
def test_cpu(self):
self.check(numpy, 1)
@attr.gpu
def test_gpu(self):
self.check(cuda.cupy, 1)
def test_2_outputs_cpu(self):
self.check(numpy, 2)
@attr.gpu
def test_2_outputs_gpu(self):
self.check(cuda.cupy, 2)
class AssertAllCloseTest(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
self.y = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
def check_identical(self, x):
testing.assert_allclose(x, x, atol=0, rtol=0)
@condition.repeat(5)
def test_identical_cpu(self):
self.check_identical(self.x)
@condition.repeat(5)
@attr.gpu
def test_identical_gpu(self):
self.check_identical(cuda.to_gpu(self.x))
def check_atol(self, x, y):
x_cpu = cuda.to_cpu(x)
y_cpu = cuda.to_cpu(y)
max_abs_diff = numpy.max(numpy.abs(x_cpu - y_cpu))
with self.assertRaises(AssertionError):
testing.assert_allclose(x, y, atol=max_abs_diff - 1, rtol=0)
testing.assert_allclose(x, y, atol=max_abs_diff + 1, rtol=0)
@condition.repeat(5)
def test_atol_cpu(self):
self.check_atol(self.x, self.y)
@condition.repeat(5)
@attr.gpu
def test_atol_gpu(self):
self.check_atol(cuda.to_gpu(self.x), cuda.to_gpu(self.y))
class AssertAllCloseTest2(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
self.y = numpy.random.uniform(1, 2, (2, 3)).astype(numpy.float32)
def check_rtol(self, x, y):
x_cpu = cuda.to_cpu(x)
y_cpu = cuda.to_cpu(y)
max_ratio = numpy.max(numpy.abs(x_cpu - y_cpu) / y_cpu)
with self.assertRaises(AssertionError):
testing.assert_allclose(x, y, atol=0, rtol=max_ratio - 1)
testing.assert_allclose(x, y, atol=0, rtol=max_ratio + 1)
@condition.repeat(5)
def test_rtol_cpu(self):
self.check_rtol(self.x, self.y)
@condition.repeat(5)
@attr.gpu
def test_rtol_gpu(self):
self.check_rtol(cuda.to_gpu(self.x), cuda.to_gpu(self.y))
class Ident(chainer.Function):
def forward(self, inputs):
return inputs
def backward(self, inputs, grads):
return grads
# numpy.float16 is not tested because of the low precision.
@testing.parameterize(*testing.product({
'dtype': [None, numpy.float32, numpy.float64],
}))
@backend.inject_backend_tests(None, [
{},
{'use_cuda': True},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
])
class TestCheckBackward(unittest.TestCase):
def test_multiple_output(self, backend_config):
x1 = backend_config.get_array(numpy.array([1], dtype='f'))
x2 = backend_config.get_array(numpy.array([1], dtype='f'))
g1 = backend_config.get_array(numpy.array([1], dtype='f'))
g2 = backend_config.get_array(numpy.array([1], dtype='f'))
def f(x, y):
s, t = Ident()(x, y)
u = Ident()(t)
return s, u
gradient_check.check_backward(
f, (x1, x2), (g1, g2), dtype=self.dtype, atol=1e-4, rtol=1e-3)
def test_no_grads_for_not_float(self, backend_config):
if backend_config.use_chainerx:
raise unittest.SkipTest(
'gradient_check does not support no_grad option for ChainerX')
x1 = backend_config.get_array(numpy.array([1], dtype='f'))
# grad check for this is skipped
x2 = backend_config.get_array(numpy.array([0, 1], dtype='i'))
g1 = backend_config.get_array(numpy.array([1], dtype='f'))
def f(x, y):
# Integer data is not casted even when dtype is given
self.assertEqual(y.dtype, 'i')
s = Ident()(x)
return s,
gradient_check.check_backward(f, (x1, x2), g1, dtype=self.dtype)
def test_no_grads_option(self, backend_config):
if backend_config.use_chainerx:
raise unittest.SkipTest(
'gradient_check does not support no_grad option for ChainerX')
x1 = backend_config.get_array(numpy.array([2], dtype='f'))
# grad check for this is skipped
x2 = backend_config.get_array(numpy.array([3], dtype='f'))
g1 = backend_config.get_array(numpy.array([5], dtype='f'))
def f(x, y):
y_array = y.array
if (backend_config.xp is chainerx
and isinstance(y_array, chainerx.ndarray)):
y_array = y_array.as_grad_stopped()
s = x + y_array
return s,
self.assertRaises(
RuntimeError, # backward computes x1.grad
gradient_check.check_backward,
f, (x1, x2), g1, no_grads=[True, True])
def test_const_input(self, backend_config):
x1 = backend_config.get_array(numpy.array([2], dtype='f'))
# grad check for this is skipped
x2 = backend_config.get_array(numpy.array([3], dtype='f'))
g1 = backend_config.get_array(numpy.array([5], dtype='f'))
def f(x, y):
y_array = y.array
if (backend_config.xp is chainerx
and isinstance(y_array, chainerx.ndarray)):
y_array = y_array.as_grad_stopped()
s = x + y_array
return s,
self.assertRaises(
AssertionError, # numerical backward to x2 is nonzero
gradient_check.check_backward,
f, (x1, x2), g1, no_grads=[False, False])
def test_no_grads_option_with_dtype(self, backend_config):
if backend_config.use_chainerx:
raise unittest.SkipTest(
'gradient_check does not support no_grad option for ChainerX')
x1 = backend_config.get_array(numpy.array([1], dtype='f'))
x2 = backend_config.get_array(numpy.array([1], dtype='f'))
g1 = backend_config.get_array(numpy.array([1], dtype='f'))
eps = 1e-3
def f(x, y):
if self.dtype is not None:
# Check for correct dtypes if f is called to compute the
# numerical gradient
if x.data != x1:
self.assertEqual(x.dtype, self.dtype)
self.assertEqual(x.dtype, y.dtype)
s = Ident()(x)
return s,
gradient_check.check_backward(f, (x1, x2), g1, eps=eps,
no_grads=[False, True], dtype=self.dtype)
class IdentNoneIsZero(chainer.Function):
"""Identity function but following None-grad convention for RNNs"""
def forward(self, inputs):
return inputs
def backward(self, inputs, grads):
return tuple(
numpy.zeros_like(x) if g is None else g
for x, g in zip(inputs, grads)
)
@testing.parameterize(*testing.product({
'dtype': [None, numpy.float32, numpy.float64],
'size': [3, 1]
}))
class TestCheckBackwardNoneConvention(unittest.TestCase):
dtype = numpy.float64
def test_multiple_output(self):
size = self.size
x1 = numpy.arange(size).astype('float32')
x2 = numpy.arange(size).astype('float32')
g1 = numpy.ones(size, dtype='float32')
g2 = numpy.ones(size, dtype='float32')
def f(x, y):
s, t = IdentNoneIsZero()(x, y)
return s, t
gradient_check.check_backward(
f, (x1, x2), (g1, g2), dtype=self.dtype, atol=1e-2, rtol=1e-2)
gradient_check.check_backward(
f, (x1, x2), (g1, None), dtype=self.dtype, atol=1e-2, rtol=1e-2)
gradient_check.check_backward(
f, (x1, x2), (None, g2), dtype=self.dtype, atol=1e-2, rtol=1e-2)
class TestCheckBackwardFailure(unittest.TestCase):
def _broken_func_1(self):
class Broken(chainer.Function):
def forward(self, inputs):
x, = inputs
return (x * x),
def backward(self, inputs, grad_outputs):
x, = inputs
gy, = grad_outputs
return 3 * x * gy,
return Broken()
def _broken_func_2(self):
class Broken(chainer.FunctionNode):
def forward(self, inputs):
x, = inputs
self.retain_inputs((0,))
return (x * x),
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
gy, = grad_outputs
return 3 * x * gy,
return Broken()
def _broken_func_3(self):
class Broken(chainer.FunctionNode):
def forward(self, inputs):
x, = inputs
self.retain_inputs((0,))
return (x * x),
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
gy, = grad_outputs
gx1 = 2 * x * gy
gx2 = 3 * x * gy
return (gx1, gx2)
return Broken()
def test_fail_function(self):
# Invalid backward (chainer.Function)
x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
def f(x):
return self._broken_func_1()(x)
with self.assertRaises(AssertionError):
gradient_check.check_backward(f, x, gy)
def test_fail_function_node(self):
# Invalid backward (chainer.FunctionNode)
x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
def f(x):
return self._broken_func_2().apply((x,))
with self.assertRaises(AssertionError):
gradient_check.check_backward(f, x, gy)
def test_fail_invalid_number_of_gradients(self):
# Invalid number of gradients
x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
def f(x):
return self._broken_func_3().apply((x,))
with self.assertRaises(ValueError):
gradient_check.check_backward(f, x, gy)
def test_fail_invalid_number_of_gradients_0_size(self):
# Invalid number of gradients (0-sized input)
x = numpy.random.uniform(-1, 1, (2, 0)).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, (2, 0)).astype(numpy.float32)
def f(x):
return self._broken_func_3().apply((x,))
with self.assertRaises(ValueError):
gradient_check.check_backward(f, x, gy)
class NewIdent(chainer.FunctionNode):
def forward(self, inputs):
return inputs
def backward(self, indexes, grad_outputs):
return NewIdent().apply(grad_outputs)
@backend.inject_backend_tests(None, [
{},
{'use_cuda': True},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
])
class TestCheckDoubleBackward(unittest.TestCase):
def test_multiple_input_output(self, backend_config):
x1, x2, gy1, gy2, ggx1, ggx2 = [
backend_config.get_array(numpy.ones((2, 3), 'f'))
for _ in range(6)]
def f(x, y):
w1 = x + y
w2 = w1 + y
return w1 * w1, w2 * w2
gradient_check.check_double_backward(
f, (x1, x2), (gy1, gy2),
(ggx1, ggx2), dtype='d', atol=1e-3, rtol=1e-3)
def test_double_backward_with_params(self, backend_config):
if backend_config.use_chainerx:
raise unittest.SkipTest(
'ChainerX does not support params argument of '
'gradient_check.check_double_backward().')
x, gy, ggx, param_a, ggparam = [
backend_config.get_array(numpy.ones((2, 3), 'f'))
for _ in range(5)]
param = chainer.Variable(param_a)
def f(x):
return x * param
gradient_check.check_double_backward(
f, x, gy, ggx, param, ggparam, atol=1e-3, rtol=1e-3)
@testing.parameterize(*testing.product({
'size': [0, 1, 5, 64]
}))
@backend.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestSampleUnitVector(unittest.TestCase):
def test_sample_unit_vector(self, backend_config):
size = self.size
device = backend_config.device
# _sample_unit_vector uses the current device
with chainer.using_device(device):
y = gradient_check._CheckBackward._sample_unit_vector(
size, device.xp)
assert device.is_array_supported(y)
assert y.shape == (size,)
y_cpu = chainer.get_device('@numpy').send(y)
if size >= 1:
numpy.testing.assert_allclose(numpy.square(y_cpu).sum(), 1.0)
assert numpy.min(abs(y_cpu)) >= 0.1 / numpy.sqrt(size)
if size >= 64:
assert numpy.min(y_cpu) < 0 < numpy.max(y_cpu)
testing.run_module(__name__, __file__)
| 28,541
| 31.507973
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_backend.py
|
import unittest
import numpy
import pytest
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import testing
from chainer.testing import attr
import chainerx
if chainerx.is_available():
import chainerx.testing
class _TestCopyToBase(object):
src_data = numpy.arange(1, 5, dtype=numpy.float32)
dst_data = numpy.zeros_like(src_data)
def _get_dst(self):
raise NotImplementedError
@staticmethod
def _to_cpu(arr):
return backend.CpuDevice().send(arr)
def test_from_cpu(self):
src = self.src_data
dst = self._get_dst()
backend.copyto(dst, src)
numpy.testing.assert_array_equal(self._to_cpu(dst), self.src_data)
@attr.gpu
def test_from_gpu(self):
src = cuda.cupy.array(self.src_data)
dst = self._get_dst()
backend.copyto(dst, src)
numpy.testing.assert_array_equal(self._to_cpu(dst), self.src_data)
@attr.ideep
def test_from_ideep(self):
src = intel64.ideep.array(self.src_data)
dst = self._get_dst()
assert isinstance(src, intel64.mdarray)
backend.copyto(dst, src)
numpy.testing.assert_array_equal(self._to_cpu(dst), self.src_data)
@attr.chainerx
def test_from_chx_native(self):
src = chainerx.array(self.src_data, device='native')
dst = self._get_dst()
backend.copyto(dst, src)
numpy.testing.assert_array_equal(self._to_cpu(dst), self.src_data)
@attr.chainerx
@attr.gpu
def test_from_chx_cuda(self):
src = chainerx.array(self.src_data, device='cuda:0')
dst = self._get_dst()
backend.copyto(dst, src)
numpy.testing.assert_array_equal(self._to_cpu(dst), self.src_data)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32],
}))
class TestCopyToCPU(_TestCopyToBase, unittest.TestCase):
def _get_dst(self):
return self.dst_data.astype(self.dtype, copy=False)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32],
}))
@attr.gpu
class TestCopyToGPU(_TestCopyToBase, unittest.TestCase):
def _get_dst(self):
return cuda.cupy.array(self.dst_data, self.dtype)
@attr.multi_gpu(2)
def test_gpu_to_another_gpu(self):
src = cuda.cupy.array(self.src_data)
with cuda.get_device_from_id(1):
dst = self._get_dst()
backend.copyto(dst, src)
cuda.cupy.testing.assert_array_equal(dst, src)
@attr.ideep
class TestCopyToIDeep(_TestCopyToBase, unittest.TestCase):
def _get_dst(self):
dst = intel64.ideep.array(self.src_data)
assert isinstance(dst, intel64.mdarray)
return dst
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32],
}))
@attr.chainerx
class TestCopyToChxNative(_TestCopyToBase, unittest.TestCase):
def _get_dst(self):
return chainerx.array(self.dst_data, dtype=self.dtype, device='native')
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32],
}))
@attr.chainerx
@attr.gpu
class TestCopyToChxCuda(_TestCopyToBase, unittest.TestCase):
def _get_dst(self):
return chainerx.array(self.dst_data, dtype=self.dtype, device='cuda:0')
class TestCopyToError(unittest.TestCase):
def test_fail_on_invalid_src(self):
src = None
dst = numpy.zeros(1)
with self.assertRaises(TypeError):
backend.copyto(dst, src)
def test_fail_on_invalid_dst(self):
src = numpy.zeros(1)
dst = None
with self.assertRaises(TypeError):
backend.copyto(dst, src)
class TestGetArrayModule(unittest.TestCase):
def test_get_array_module_for_numpy_array(self):
xp = backend.get_array_module(numpy.array([]))
self.assertIs(xp, numpy)
assert xp is not cuda.cupy
assert xp is not chainerx
def test_get_array_module_for_numpy_variable(self):
xp = backend.get_array_module(chainer.Variable(numpy.array([])))
assert xp is numpy
assert xp is not cuda.cupy
assert xp is not chainerx
@attr.gpu
def test_get_array_module_for_cupy_array(self):
xp = backend.get_array_module(cuda.cupy.array([]))
assert xp is cuda.cupy
assert xp is not numpy
assert xp is not chainerx
@attr.gpu
def test_get_array_module_for_cupy_variable(self):
xp = backend.get_array_module(chainer.Variable(cuda.cupy.array([])))
assert xp is cuda.cupy
assert xp is not numpy
assert xp is not chainerx
@attr.chainerx
def test_get_array_module_for_chainerx_array(self):
xp = backend.get_array_module(chainerx.array([]))
assert xp is chainerx
assert xp is not numpy
assert xp is not cuda.cupy
@attr.chainerx
def test_get_array_module_for_chainerx_variable(self):
xp = backend.get_array_module(chainer.Variable(chainerx.array([])))
assert xp is chainerx
assert xp is not numpy
assert xp is not cuda.cupy
class TestGetDeviceFromArray(unittest.TestCase):
# This test only checks fallback case (for unrecognized arguments).
# Successful cases are tested in each backend's unit tests
# (placed in `backend_tests`).
def check_unrecognized(self, arg):
device = backend.get_device_from_array(arg)
assert device == backend.CpuDevice()
def test_unrecognized(self):
# Unrecognized arguments fall back to CpuDevice
self.check_unrecognized(numpy.int64(1))
self.check_unrecognized(None)
self.check_unrecognized(1)
self.check_unrecognized(())
self.check_unrecognized(object())
class TestDeviceSpec(unittest.TestCase):
"""Test for backend.get_device() and backend.using_device()"""
def check_device_spec_numpy(self, device_spec):
device = backend.get_device(device_spec)
assert isinstance(device, backend.CpuDevice)
assert device.xp is numpy
with backend.using_device(device_spec):
# TODO(niboshi): Test the Chainer default device
pass
def check_device_spec_cupy(self, device_spec, expected_device_id):
device = backend.get_device(device_spec)
assert isinstance(device, backend.GpuDevice)
assert isinstance(device.device, cuda.Device)
assert device.xp is cuda.cupy
assert device.device.id == expected_device_id
with backend.using_device(device_spec):
# TODO(niboshi): Test the Chainer default device
assert cuda.Device() == cuda.Device(expected_device_id)
def check_device_spec_chainerx(self, device_spec, expected_device_name):
device = backend.get_device(device_spec)
assert isinstance(device, backend.ChainerxDevice)
assert device.xp is chainerx
assert isinstance(device.device, chainerx.Device)
assert device.device.name == expected_device_name
with backend.using_device(device_spec):
# TODO(niboshi): Test the Chainer default device
assert (
chainerx.get_default_device()
== chainerx.get_device(expected_device_name))
def check_device_spec_intel64(self, device_spec):
device = backend.get_device(device_spec)
assert isinstance(device, backend.Intel64Device)
assert device.xp is numpy
with backend.using_device(device_spec):
# TODO(niboshi): Test the Chainer default device
pass
def check_invalid(self, device_spec):
with pytest.raises(Exception):
backend.get_device(device_spec)
with pytest.raises(Exception):
backend.using_device(device_spec)
def test_str_numpy(self):
self.check_device_spec_numpy('@numpy')
def test_legacy_int_numpy(self):
self.check_device_spec_numpy(-1)
def test_legacy_str_numpy(self):
self.check_device_spec_numpy('-1')
def test_module_numpy_device(self):
self.check_device_spec_numpy(backend.CpuDevice())
@attr.chainerx
def test_str_chainerx_backend(self):
self.check_device_spec_chainerx('native', 'native:0')
@attr.chainerx
def test_str_chainerx_device(self):
self.check_device_spec_chainerx('native:0', 'native:0')
@attr.gpu
def test_str_cupy_device(self):
self.check_device_spec_cupy('@cupy:0', 0)
@attr.gpu
def test_legacy_int_cupy_device(self):
self.check_device_spec_cupy(0, 0)
@attr.gpu
def test_legacy_str_cupy_device(self):
self.check_device_spec_cupy('0', 0)
@attr.multi_gpu(2)
def test_str_cupy_device_multi_gpu(self):
self.check_device_spec_cupy('@cupy:1', 1)
@attr.multi_gpu(2)
def test_legacy_int_cupy_device_multi_gpu(self):
self.check_device_spec_cupy(1, 1)
@attr.multi_gpu(2)
def test_legacy_str_cupy_device_multi_gpu(self):
self.check_device_spec_cupy('1', 1)
@attr.chainerx
def test_chainerx_device(self):
chainerx_device = chainerx.get_device('native:0')
self.check_device_spec_chainerx(chainerx_device, 'native:0')
@attr.gpu
def test_cuda_device(self):
cupy_device = cuda.Device(0)
self.check_device_spec_cupy(cupy_device, 0)
@attr.ideep
def test_str_intel64(self):
self.check_device_spec_intel64('@intel64')
def test_str_chainerx_invalid(self):
self.check_invalid('native:foo')
self.check_invalid('')
def test_str_module_invalid(self):
self.check_invalid('@foo')
self.check_invalid('@foo:0')
def test_str_cupy_invalid(self):
self.check_invalid('@cupy')
self.check_invalid('@cupy::0')
def test_str_numpy_invalid(self):
self.check_invalid('@numpy:')
self.check_invalid('@numpy:0')
self.check_invalid('@:numpy')
def test_tuple_invalid(self):
# tuple is no longer supported from Chainer
self.check_invalid(('native', 0))
def test_cuda_dummy_device_invalid(self):
self.check_invalid(cuda.DummyDevice)
@unittest.skipIf(
chainerx.is_available(), 'Only tested when ChainerX is not built')
def test_chx_device_spec_without_chx_available(self):
# If chainerx is not available, get_device() with unprefixed string
# should mention ChainerX unavailability in the error message.
with pytest.raises(RuntimeError, match=r'.*ChainerX.*'):
chainer.get_device('foo')
class TestDevice(unittest.TestCase):
def test_repr_str_numpy(self):
device = chainer.get_device('@numpy')
assert str(device) == '@numpy'
@attr.chainerx
def test_repr_str_chainerx_device(self):
device = chainer.get_device('native:0')
assert str(device) == 'native:0'
@attr.gpu
def test_repr_str_cupy_device(self):
device = chainer.get_device('@cupy:0')
assert str(device) == '@cupy:0'
@attr.ideep
def test_repr_str_intel64_device(self):
device = chainer.get_device('@intel64')
assert str(device) == '@intel64'
def test_repr_numpy(self):
device = chainer.get_device('@numpy')
assert repr(device) == '<CpuDevice (numpy)>'
@attr.chainerx
def test_repr_chainerx_device(self):
device = chainer.get_device('native:0')
assert repr(device) == '<ChainerxDevice native:0>'
@attr.gpu
def test_repr_cupy_device(self):
device = chainer.get_device('@cupy:0')
assert repr(device) == '<GpuDevice (cupy):0>'
@attr.ideep
def test_repr_intel64_device(self):
device = chainer.get_device('@intel64')
assert repr(device) == '<Intel64Device>'
def test_eq_numpy(self):
assert backend.get_device('@numpy') == backend.get_device('@numpy')
assert backend.CpuDevice() == backend.get_device('@numpy')
# __ne__()
assert not backend.CpuDevice() != backend.get_device('@numpy')
@attr.gpu
def test_eq_cupy(self):
assert (backend.get_device('@cupy:0')
!= backend.get_device('@numpy'))
assert (backend.get_device('@cupy:0')
== backend.get_device('@cupy:0'))
assert (backend.get_device('@cupy:0')
!= backend.get_device('@cupy:1'))
@attr.chainerx
def test_eq_chainerx(self):
assert backend.get_device('native:0') == backend.get_device('native:0')
assert backend.get_device('native:0') != backend.get_device('native:1')
@attr.chainerx
@attr.gpu
def test_eq_chainerx_cupy(self):
assert (
backend.get_device('native:0')
!= backend.get_device('@cupy:0'))
class TestDeviceSend(unittest.TestCase):
def orig_numpy(self):
return numpy.ones((2, 3), numpy.float32)
def orig_cupy(self):
arr = cuda.to_gpu(numpy.ones((2, 3), numpy.float32))
assert isinstance(arr, cuda.ndarray)
return arr
def orig_chainerx(self, device_name):
return chainerx.ones((2, 3), numpy.float32, device=device_name)
def send_check_equal(self, orig, device_spec):
device = backend.get_device(device_spec)
converted = device.send(orig)
numpy.testing.assert_array_equal(
backend.CpuDevice().send(orig),
backend.CpuDevice().send(converted))
return converted
def test_numpy_to_numpy(self):
orig = self.orig_numpy()
converted = self.send_check_equal(orig, '@numpy')
assert converted is orig
@attr.gpu
def test_numpy_to_cupy(self):
orig = self.orig_numpy()
converted = self.send_check_equal(orig, '@cupy:0')
assert isinstance(converted, cuda.ndarray)
assert converted.device == cuda.Device(0)
@attr.chainerx
def test_numpy_to_chainerx(self):
orig = self.orig_numpy()
converted = self.send_check_equal(orig, 'native:0')
assert isinstance(converted, chainerx.ndarray)
assert converted.device.name == 'native:0'
# memory must be shared
orig[:] *= 2
numpy.testing.assert_array_equal(
orig, backend.CpuDevice().send(converted))
@attr.chainerx
@attr.gpu
def test_numpy_to_chainerx_cuda(self):
orig = self.orig_numpy()
converted = self.send_check_equal(orig, 'cuda:0')
assert isinstance(converted, chainerx.ndarray)
assert converted.device.name == 'cuda:0'
@attr.gpu
def test_cupy_to_numpy(self):
orig = self.orig_cupy()
converted = self.send_check_equal(orig, '@numpy')
assert isinstance(converted, numpy.ndarray)
@attr.gpu
def test_cupy_to_cupy(self):
orig = self.orig_cupy()
converted = self.send_check_equal(orig, '@cupy:0')
assert isinstance(converted, cuda.ndarray)
assert converted.device == orig.device
# memory must be shared
orig[:] *= 2
numpy.testing.assert_array_equal(
backend.CpuDevice().send(orig),
backend.CpuDevice().send(converted))
@attr.chainerx
@attr.gpu
def test_cupy_to_chainerx(self):
orig = self.orig_cupy()
converted = self.send_check_equal(orig, 'cuda:0')
assert isinstance(converted, chainerx.ndarray)
assert converted.device.name == 'cuda:0'
# memory must be shared
orig[:] *= 2
numpy.testing.assert_array_equal(
backend.CpuDevice().send(orig),
backend.CpuDevice().send(converted))
@attr.multi_gpu(2)
def test_cupy_to_cupy_multigpu(self):
orig = self.orig_cupy()
converted = self.send_check_equal(orig, '@cupy:1')
assert isinstance(converted, cuda.ndarray)
assert converted.device.id == 1
@attr.chainerx
@attr.gpu
def test_cupy_to_chainerx_native(self):
orig = self.orig_cupy()
converted = self.send_check_equal(orig, 'native:0')
assert isinstance(converted, chainerx.ndarray)
assert converted.device.name == 'native:0'
@attr.chainerx
@attr.multi_gpu(2)
def test_cupy_to_chainerx_multigpu(self):
orig = self.orig_cupy()
converted = self.send_check_equal(orig, 'cuda:1')
assert isinstance(converted, chainerx.ndarray)
assert converted.device.name == 'cuda:1'
@attr.chainerx
def test_chainerx_native_to_numpy(self):
orig = self.orig_chainerx('native:0')
converted = self.send_check_equal(orig, '@numpy')
assert isinstance(converted, numpy.ndarray)
# memory must be shared
converted[:] *= 2
numpy.testing.assert_array_equal(
backend.CpuDevice().send(orig),
backend.CpuDevice().send(converted))
@attr.chainerx
@attr.gpu
def test_chainerx_cuda_to_cupy(self):
orig = self.orig_chainerx('cuda:0')
converted = self.send_check_equal(orig, '@cupy:0')
assert isinstance(converted, cuda.ndarray)
assert converted.device.id == 0
# memory must be shared
converted[:] *= 2
numpy.testing.assert_array_equal(
backend.CpuDevice().send(orig),
backend.CpuDevice().send(converted))
@attr.chainerx
@attr.multi_gpu(2)
def test_chainerx_cuda_to_cupy_multigpu(self):
orig = self.orig_chainerx('cuda:0')
converted = self.send_check_equal(orig, '@cupy:1')
assert isinstance(converted, cuda.ndarray)
assert converted.device.id == 1
# memory must not be shared
converted_copy = converted.copy()
with cuda.Device(1):
converted[:] *= 2
numpy.testing.assert_array_equal(
backend.CpuDevice().send(orig),
backend.CpuDevice().send(converted_copy))
@attr.chainerx
@attr.gpu
def test_chainerx_cuda_to_numpy(self):
orig = self.orig_chainerx('cuda:0')
converted = self.send_check_equal(orig, '@numpy')
assert isinstance(converted, numpy.ndarray)
def test_numpy_to_numpy_with_device(self):
orig = self.orig_numpy()
self.send_check_equal(orig, backend.CpuDevice())
testing.run_module(__name__, __file__)
| 18,285
| 31.080702
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_runnable.py
|
import io
import os
import re
import unittest
from chainer import testing
class TestRunnable(unittest.TestCase):
def test_runnable(self):
cwd = os.path.dirname(__file__)
regex = re.compile(r'^test_.*\.py$')
for dirpath, dirnames, filenames in os.walk(cwd):
for filename in filenames:
if not regex.match(filename):
continue
path = os.path.join(dirpath, filename)
with io.open(path, encoding='utf-8') as f:
source = f.read()
self.assertIn('testing.run_module(__name__, __file__)',
source,
'''{0} is not runnable.
Call testing.run_module at the end of the test.'''.format(path))
testing.run_module(__name__, __file__)
| 825
| 28.5
| 71
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_function.py
|
import threading
import unittest
import mock
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
def make_array(start, shape, dtype):
size = numpy.product(shape, dtype='i')
a = numpy.arange(start, start + size)
a = a.reshape(shape)
a = a.astype(dtype, copy=False)
return a
@testing.parameterize(*testing.product({
'y_shape': [(4,), (0,), (2, 3), ()],
'x_shape': [(3,), (0,), (4, 1), ()],
}))
class TestFunction(unittest.TestCase):
def _get_method(self, prefix, gpu):
suffix = 'gpu' if gpu else 'cpu'
return getattr(self.f, prefix + '_' + suffix)
def setUp(self):
y_shape = self.y_shape
x_shape = self.x_shape
y1 = make_array(1, y_shape, numpy.float32)
y2 = make_array(2, y_shape, numpy.float32)
gx1 = make_array(1, x_shape, numpy.float32)
gx2 = None
gy1 = make_array(1, y_shape, numpy.float32)
gy2 = make_array(1, y_shape, numpy.float32)
f = chainer.Function()
f.check_type_forward = mock.MagicMock()
f.forward_cpu = mock.MagicMock(return_value=(y1, y2))
f.forward_gpu = mock.MagicMock()
f.backward_cpu = mock.MagicMock(return_value=(gx1, gx2))
f.backward_gpu = mock.MagicMock()
self.f = f
self.x1 = make_array(0, x_shape, numpy.float32)
self.x2 = make_array(0, x_shape, numpy.int32)
self.y1 = y1
self.y2 = y2
self.gx1 = gx1
self.gx2 = gx2
self.gy1 = gy1
self.gy2 = gy2
def tearDown(self):
# Set None to delete cuda array
self.f = None
self.y1 = None
self.y2 = None
self.gx1 = None
def setup_gpu(self, device=0):
self.x1 = cuda.to_gpu(self.x1, device)
self.x2 = cuda.to_gpu(self.x2, device)
self.y1 = cuda.to_gpu(self.y1, device)
self.y2 = cuda.to_gpu(self.y2, device)
self.gx1 = cuda.to_gpu(self.gx1, device)
self.gx2 = None
self.gy1 = cuda.to_gpu(self.gy1, device)
self.gy2 = cuda.to_gpu(self.gy2, device)
self.f.forward_gpu = mock.MagicMock(return_value=(self.y1, self.y2))
self.f.backward_gpu = mock.MagicMock(return_value=(self.gx1, self.gx2))
def check_forward(self, gpu):
y1, y2 = self.f.forward((self.x1, self.x2))
self.assertEqual(self.f.check_type_forward.call_count, 0)
self.assertEqual(self._get_method('forward', not gpu).call_count, 0)
self._get_method('forward', gpu).assert_called_once_with(
(self.x1, self.x2))
self.assertTrue((cuda.to_cpu(y1) == cuda.to_cpu(self.y1)).all())
self.assertTrue((cuda.to_cpu(y2) == cuda.to_cpu(self.y2)).all())
def test_forward_cpu(self):
self.check_forward(False)
@attr.gpu
def test_forward_gpu(self):
self.setup_gpu()
self.check_forward(True)
def check_backward(self, gpu):
gx1, gx2 = self.f.backward((self.x1, self.x2), (self.gy1, self.gy2))
self.assertEqual(self._get_method('backward', not gpu).call_count, 0)
self._get_method('backward', gpu).assert_called_once_with(
(self.x1, self.x2), (self.gy1, self.gy2))
self.assertTrue((cuda.to_cpu(gx1) == cuda.to_cpu(self.gx1)).all())
self.assertIsNone(gx2)
def test_backward_cpu(self):
self.check_backward(False)
@attr.gpu
def test_backward_gpu(self):
self.setup_gpu()
self.check_backward(True)
def check_check_type_forward(self):
self.assertEqual(self.f.check_type_forward.call_count, 1)
ts = self.f.check_type_forward.call_args[0][0]
self.assertIsInstance(ts, type_check.LightTypeInfoTuple)
self.assertEqual(len(ts), 2)
t1 = ts[0]
assert t1.shape == self.x_shape
assert t1.dtype == numpy.float32
t2 = ts[1]
assert t2.shape == self.x_shape
assert t2.dtype == numpy.int32
def check_call(self, check_backward=False):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
x1._node._rank = 1
x2._node._rank = 3
ys = self.f(x1, x2)
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
for y in ys:
self.assertIsInstance(y, chainer.Variable)
# rank is (maximum rank in xs) + 1
self.assertEqual(y.rank, 4)
self.assertIs(y.creator, self.f)
self.assertTrue(y.requires_grad)
self.assertIsInstance(y.creator.outputs, tuple)
if check_backward:
ys[0].creator_node.backward(
(0, 1),
(chainer.Variable(self.gy1), chainer.Variable(self.gy2)))
def test_call_cpu(self):
self.check_call()
@attr.gpu
def test_call_gpu(self):
self.setup_gpu()
self.check_call()
@attr.multi_gpu(2)
def test_call_another_gpu(self):
device = 1
self.setup_gpu(device)
def check_current_device(ret):
def meth(func_self, *args, **kwargs):
current_device = cuda.cupy.cuda.Device().id
# TODO(niboshi):
# This test fails with zero-sized arrays because CUDA device is
# not defined for such arrays.
# See: https://github.com/chainer/chainer/issues/3702
if not (self.y1.size == 0 or self.x1.size == 0):
assert current_device == device
return ret
return meth
self.f.forward = check_current_device((self.y1, self.y2))
self.f.backward = check_current_device((self.gx1, self.gx2))
self.check_call(check_backward=True)
def check_call_all_ndarray(self):
x1 = self.x1
x2 = self.x2
ys = self.f(x1, x2)
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
xp = backend.get_array_module(x1)
for y in ys:
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.data, xp.ndarray)
self.assertFalse(y.requires_grad)
def test_call_all_ndarray_cpu(self):
self.check_call_all_ndarray()
@attr.gpu
def test_call_all_ndarray_gpu(self):
self.setup_gpu()
self.check_call_all_ndarray()
def check_call_ndarray(self):
x1 = chainer.Variable(self.x1)
x2 = self.x2
x1._node._rank = 1
ys = self.f(x1, x2)
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
for y in ys:
self.assertIsInstance(y, chainer.Variable)
# rank is (maximum rank in xs) + 1
self.assertEqual(y.rank, 2)
self.assertIs(y.creator, self.f)
self.assertTrue(y.requires_grad)
self.assertIsInstance(y.creator.outputs, tuple)
def test_call_ndarray_cpu(self):
self.check_call_ndarray()
@attr.gpu
def test_call_ndarray_gpu(self):
self.setup_gpu()
self.check_call_ndarray()
def check_call_single_return_value(self):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
ret = self.f(x1, x2)
self.assertIsInstance(ret, chainer.Variable)
def test_call_single_return_value_cpu(self):
self.f.forward_cpu.return_value = (cuda.to_cpu(self.y1),)
self.check_call_single_return_value()
@attr.gpu
def test_call_single_return_value_gpu(self):
self.setup_gpu()
self.f.forward_gpu.return_value = (cuda.to_gpu(self.y1),)
self.check_call_single_return_value()
def _get_f(self):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
y1, y2 = self.f(x1, x2)
f = y1.creator
# To test weak refernece, return only x1 and y1.
# x2 and y2 are deleted by the garbage collector
return f, x1, y1
def test_unchain(self):
f, _x1, _y1 = self._get_f()
y1, y2 = f.outputs
f.unchain()
# As _y1 is alive, this weak ref is also alive
y1_ref = y1()
self.assertIsNotNone(y1_ref)
self.assertIsNone(y1_ref.creator)
# This weak ref is dead by unchain
y2_ref = y2()
self.assertIsNone(y2_ref)
self.assertIsNone(f.inputs)
def test_label(self):
self.assertEqual(self.f.label, 'Function')
class TestFunctionBackwardIntegration(unittest.TestCase):
def test_backward(self):
x = chainer.Variable(numpy.array([1]), name='x')
y1 = F.identity(x)
y1.name = 'y1'
y2 = F.identity(x)
y2.name = 'y2'
z = y1 + y2
z.name = 'z'
z.grad = numpy.array([1])
z.backward(retain_grad=True)
self.assertEqual(y1.grad[0], 1)
self.assertEqual(y2.grad[0], 1)
self.assertEqual(x.grad[0], 2)
class TestFunctionInvalidType(unittest.TestCase):
def test_forward_invalid1(self):
class Function(chainer.Function):
def check_type_forward(self, in_types):
x_type, = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim >= 2,
)
def forward(self, inputs):
return inputs
f = Function()
# OK
v = chainer.Variable(numpy.random.randn(1, 5).astype(numpy.float32))
result = f(v)
assert isinstance(result, chainer.Variable)
# Incorrect dtype
# in py3, numpy dtypes are represented as class
msg = """\
Invalid operation is performed in: Function \\(Forward\\)
Expect: in_types\\[0\\]\\.dtype == <(type|class) 'numpy\\.float32'>
Actual: float64 \\!= <(type|class) 'numpy\\.float32'>"""
v = chainer.Variable(numpy.random.randn(1, 5))
with six.assertRaisesRegex(self, chainer.utils.type_check.InvalidType,
msg):
f(v)
# Incorrect dim
msg = """\
Invalid operation is performed in: Function \\(Forward\\)
Expect: in_types\\[0\\]\\.ndim >= 2
Actual: 1 < 2"""
v = chainer.Variable(numpy.random.randn(5).astype(numpy.float32))
with six.assertRaisesRegex(self, chainer.utils.type_check.InvalidType,
msg):
f(v)
@testing.parameterize(
{'return_value': (numpy.array([float('nan')], numpy.float32),),
'valid': False},
{'return_value': (numpy.array([1], numpy.int32),), 'valid': True},
)
class TestFunctionForwardDebug(unittest.TestCase):
def setUp(self):
self.original_debug = chainer.is_debug()
chainer.set_debug(True)
self.one = numpy.array([1], numpy.float32)
self.f = chainer.Function()
def tearDown(self):
chainer.set_debug(self.original_debug)
def check_debug_forward(self, x_data):
x = chainer.Variable(x_data)
if self.valid:
# check if forward throws nothing
self.f(x)
else:
with self.assertRaises(RuntimeError):
self.f(x)
def test_debug_forward_cpu(self):
self.f.forward_cpu = mock.MagicMock(return_value=self.return_value)
self.check_debug_forward(self.one)
@attr.gpu
def test_debug_forward_gpu(self):
return_value = tuple(None if x is None else cuda.to_gpu(x)
for x in self.return_value)
self.f.forward_gpu = mock.MagicMock(return_value=return_value)
self.check_debug_forward(cuda.to_gpu(self.one))
@testing.parameterize(
{'return_value': (numpy.array(float('nan'), numpy.float32),),
'valid': False},
{'return_value': (None,), 'valid': True},
)
class TestFunctionBackwardDebug(unittest.TestCase):
def setUp(self):
self.original_debug = chainer.is_debug()
chainer.set_debug(True)
self.one = numpy.array(1, numpy.float32)
self.f = chainer.Function()
def tearDown(self):
chainer.set_debug(self.original_debug)
def check_debug_backward(self, *xs_data):
xs = [chainer.Variable(x) for x in xs_data]
y = self.f(*xs)
if self.valid:
# check if backard throws nothing
y.backward()
else:
with self.assertRaises(RuntimeError):
y.backward()
def test_debug_backward_cpu(self):
self.f.forward_cpu = mock.MagicMock(return_value=(self.one,))
self.f.backward_cpu = mock.MagicMock(return_value=self.return_value)
input_value = (self.one,) * len(self.return_value)
self.check_debug_backward(*input_value)
@attr.gpu
def test_debug_backward_gpu(self):
self.f.forward_gpu = mock.MagicMock(
return_value=(cuda.to_gpu(self.one),))
return_value = tuple(None if x is None else cuda.to_gpu(x)
for x in self.return_value)
input_value = (cuda.to_gpu(self.one),) * len(self.return_value)
self.f.backward_gpu = mock.MagicMock(return_value=return_value)
self.check_debug_backward(*input_value)
class TestNoBackpropMode(unittest.TestCase):
def setUp(self):
self.x = chainer.Variable(numpy.array([1.], 'f'))
def test_no_backprop_mode(self):
y = self.x + 1
self.assertTrue(y.creator_node is not None)
with chainer.no_backprop_mode():
y = self.x + 1
self.assertTrue(y.creator_node is None)
y = self.x + 1
self.assertTrue(y.creator_node is not None)
def test_force_backprop_mode(self):
with chainer.no_backprop_mode():
with chainer.force_backprop_mode():
y = self.x + 1
self.assertTrue(y.creator_node is not None)
y = self.x + 1
self.assertTrue(y.creator_node is not None)
with chainer.force_backprop_mode():
y = self.x + 1
self.assertTrue(y.creator_node is not None)
class MyThread(threading.Thread):
def run(self):
x = chainer.Variable(numpy.array([1], dtype='f'))
with chainer.no_backprop_mode():
y = x + 1
self.creator_is_none = y.creator is None
class TestBackpropModeMultiThread(unittest.TestCase):
def test_multi_thread(self):
t = MyThread()
t.start()
t.join()
self.assertTrue(t.creator_is_none)
class FunctionWithRetaining(chainer.Function):
def forward(self, inputs):
self.retain_inputs([1])
self.retain_outputs([1])
return inputs
def backward(self, inputs, grad_outputs):
self.backward_inputs = inputs
return grad_outputs
class TestFunctionRetaining(unittest.TestCase):
def setUp(self):
inputs = [chainer.Variable(numpy.array([1], dtype=numpy.float32)),
chainer.Variable(numpy.array([1], dtype=numpy.float32))]
self.input_data = [x.data for x in inputs]
self.input_nodes = [x.node for x in inputs]
self.f1 = FunctionWithRetaining()
outputs = self.f1(*inputs)
outputs[0].grad = numpy.array([1], dtype=numpy.float32)
outputs[0].backward()
self.f1_output_data = [y.data for y in outputs]
self.f1_output_nodes = [y.node for y in outputs]
inputs = None # release non-retained inputs
def test_retain_inputs(self):
self.assertEqual([x.data for x in self.input_nodes],
[None, self.input_data[1]])
self.assertEqual(tuple(x.data for x in self.input_nodes),
self.f1.backward_inputs)
def test_retain_outputs_f1(self):
self.assertEqual([y.data for y in self.f1_output_nodes],
[None, self.f1_output_data[1]])
self.assertEqual(tuple(y.data for y in self.f1_output_nodes),
self.f1.output_data)
testing.run_module(__name__, __file__)
| 16,047
| 30.22179
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_computational_graph.py
|
import unittest
import numpy as np
import six
from chainer import computational_graph as c
from chainer import function
from chainer import testing
from chainer import variable
class MockFunction(function.Function):
def __init__(self, n_in, n_out):
self.n_in = n_in
self.n_out = n_out
def forward_cpu(self, xs):
assert len(xs) == self.n_in
return tuple(np.zeros((1, 2)).astype(np.float32)
for _ in six.moves.range(self.n_out))
def backward_cpu(self, xs, gys):
assert len(xs) == self.n_in
assert len(gys) == self.n_out
return tuple(np.zeros_like(xs).astype(np.float32)
for _ in six.moves.range(self.n_in))
def mock_function(xs, n_out):
return MockFunction(len(xs), n_out)(*xs)
def _check(self, outputs, node_num, edge_num):
g = c.build_computational_graph(outputs)
self.assertEqual(len(g.nodes), node_num)
self.assertEqual(len(g.edges), edge_num)
class TestGraphBuilder(unittest.TestCase):
# x-f-y-g-z
def setUp(self):
self.x = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y = mock_function((self.x,), 1)
self.z = mock_function((self.y,), 1)
# x
def test_head_variable(self):
_check(self, (self.x, ), 1, 0)
def test_intermediate_variable(self):
# x-f-y
_check(self, (self.y, ), 3, 2)
def test_tail_variable(self):
# x-f-y-g-z
_check(self, (self.z, ), 5, 4)
def test_multiple_outputs(self):
_check(self, (self.x, self.y), 3, 2)
def test_multiple_outputs2(self):
_check(self, (self.x, self.z), 5, 4)
def test_multiple_outputs3(self):
_check(self, (self.y, self.z), 5, 4)
def test_multiple_outputs4(self):
_check(self, (self.x, self.y, self.z), 5, 4)
def test_nontuple_outputs(self):
_check(self, self.z, 5, 4)
def test_raise_array_outputs(self):
with self.assertRaises(TypeError):
c.build_computational_graph(self.z.array)
class TestGraphBuilder2(unittest.TestCase):
# x-f-y1
# \
# g-y2
def setUp(self):
self.x = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y1 = mock_function((self.x,), 1)
self.y2 = mock_function((self.x,), 1)
def test_head_node(self):
_check(self, (self.x, ), 1, 0)
def test_tail_node(self):
_check(self, (self.y1, ), 3, 2)
def test_tail_node2(self):
_check(self, (self.y2, ), 3, 2)
def test_multiple_tails(self):
_check(self, (self.y1, self.y2), 5, 4)
class TestGraphBuilder3(unittest.TestCase):
# x-f-y1
# \
# y2
def setUp(self):
self.x = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y1, self.y2 = mock_function((self.x,), 2)
def test_head_node(self):
_check(self, (self.x, ), 1, 0)
def test_tail_node(self):
_check(self, (self.y1, ), 3, 2)
def test_tail_node2(self):
_check(self, (self.y2, ), 3, 2)
def test_multiple_tails(self):
_check(self, (self.y1, self.y2), 4, 3)
class TestGraphBuilder4(unittest.TestCase):
# x1-f-y
# /
# x2
def setUp(self):
self.x1 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.x2 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y = mock_function((self.x1, self.x2), 1)
def test_head_node1(self):
_check(self, (self.x1, ), 1, 0)
def test_head_node2(self):
_check(self, (self.x2, ), 1, 0)
def test_multiple_heads(self):
_check(self, (self.x1, self.x2), 2, 0)
def test_tail_node(self):
_check(self, (self.y, ), 4, 3)
class TestGraphBuilder5(unittest.TestCase):
def setUp(self):
self.x = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y = 2 * self.x
self.f = self.y.creator_node
self.g = c.build_computational_graph((self.y,))
def test_edges(self):
self.assertEqual(len(self.g.edges), 2)
self.assertSetEqual(set(self.g.edges),
{(self.x.node, self.f), (self.f, self.y.node)})
def test_nodes(self):
self.assertEqual(len(self.g.nodes), 3)
self.assertSetEqual(set(self.g.nodes),
{self.x.node, self.f, self.y.node})
class TestGraphBuilder6(unittest.TestCase):
def setUp(self):
self.x1 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.x2 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y = self.x1 + self.x2
self.f = self.y.creator_node
self.g = c.build_computational_graph((self.y,))
def test_edges(self):
self.assertEqual(len(self.g.edges), 3)
self.assertSetEqual(set(self.g.edges),
{(self.x1.node, self.f),
(self.x2.node, self.f),
(self.f, self.y.node)})
def test_nodes(self):
self.assertEqual(len(self.g.nodes), 4)
self.assertSetEqual(set(self.g.nodes),
{self.x1.node, self.x2.node, self.f, self.y.node})
class TestGraphBuilder7(unittest.TestCase):
def setUp(self):
self.x1 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.x2 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.x3 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y = 0.3 * (self.x1 + self.x2) + self.x3
def test_tail_node(self):
_check(self, (self.y, ), 9, 8)
class TestGraphBuilderStylization(unittest.TestCase):
def setUp(self):
self.x1 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.x2 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y = self.x1 + self.x2
self.f = self.y.creator_node
self.variable_style = {'label': 'variable_0', 'shape': 'octagon',
'style': 'filled', 'fillcolor': '#E0E0E0'}
self.function_style = {'label': 'function_0', 'shape': 'record',
'style': 'filled', 'fillcolor': '#6495ED'}
self.g = c.build_computational_graph(
(self.y,), variable_style=self.variable_style,
function_style=self.function_style)
def test_dotfile_content(self):
dotfile_content = self.g.dump()
for style in [self.variable_style, self.function_style]:
for key, value in style.items():
self.assertIn('{0}="{1}"'.format(key, value), dotfile_content)
def test_unsupported_format(self):
with self.assertRaises(NotImplementedError):
self.g.dump('graphml')
class TestGraphBuilderShowName(unittest.TestCase):
def setUp(self):
self.x1 = variable.Variable(
np.zeros((1, 2)).astype(np.float32), name='x1')
self.x2 = variable.Variable(
np.zeros((1, 2)).astype(np.float32), name='x2')
self.y = self.x1 + self.x2
self.y.name = 'y'
def test_show_name(self):
g = c.build_computational_graph((self.x1, self.x2, self.y))
dotfile_content = g.dump()
for var in [self.x1, self.x2, self.y]:
self.assertIn('label="%s:' % var.name, dotfile_content)
def test_dont_show_name(self):
g = c.build_computational_graph(
(self.x1, self.x2, self.y), show_name=False)
dotfile_content = g.dump()
for var in [self.x1, self.x2, self.y]:
self.assertNotIn('label="%s:' % var.name, dotfile_content)
class TestGraphBuilderRankdir(unittest.TestCase):
def setUp(self):
self.x1 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.x2 = variable.Variable(np.zeros((1, 2)).astype(np.float32))
self.y = self.x1 + self.x2
def test_randir(self):
for rankdir in ['TB', 'BT', 'LR', 'RL']:
g = c.build_computational_graph((self.y,), rankdir=rankdir)
self.assertIn('rankdir=%s' % rankdir, g.dump())
def test_randir_invalid(self):
self.assertRaises(ValueError,
c.build_computational_graph, (self.y,), rankdir='TL')
class TestGraphBuilderRemoveVariable(unittest.TestCase):
def setUp(self):
self.x1 = variable.Variable(np.zeros((1, 2)).astype('f'))
self.x2 = variable.Variable(np.zeros((1, 2)).astype('f'))
self.y = self.x1 + self.x2
self.f = self.y.creator_node
self.g = c.build_computational_graph((self.y,), remove_variable=True)
def test_remove_variable(self):
self.assertIn(self.f.label, self.g.dump())
self.assertNotIn(str(id(self.x1)), self.g.dump())
self.assertNotIn(str(id(self.x2)), self.g.dump())
testing.run_module(__name__, __file__)
| 8,849
| 30.161972
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_init_docstring.py
|
import importlib
import inspect
import pkgutil
import unittest
import chainer
from chainer import testing
def get_init_doc(klass):
for attr in inspect.classify_class_attrs(klass):
if attr.name == '__init__':
if attr.defining_class is klass:
return attr.object.__doc__
else:
# Ignore __init__ method inherited from a super class
return None
return None
class TestInitDocstring(unittest.TestCase):
"""Make sure classes do not have a docstring in their __init__ method."""
def check_init_docstring(self, mod, errors):
for name, value in inspect.getmembers(mod):
if not inspect.isclass(value):
continue
if 'chainer' not in value.__module__:
continue
init_doc = get_init_doc(value)
if init_doc == object.__init__.__doc__:
# Ignore doc string inherited from `object`
continue
if init_doc is not None:
# Do not permit to write docstring in `__init__`
errors.append((mod, value, init_doc))
def test_init_docstring_empty(self):
errors = []
root = chainer.__path__
for loader, modname, ispkg in pkgutil.walk_packages(root, 'chainer.'):
# Skip modules generated by protobuf.
if '_pb2' in modname:
continue
try:
mod = importlib.import_module(modname)
except ImportError:
continue
self.check_init_docstring(mod, errors)
if errors:
msg = ''
for mod, value, init_doc in errors:
msg += '{}.{} has __init__.__doc__:\n{}\n\n'.format(
mod.__name__, value, init_doc)
self.fail(msg)
testing.run_module(__name__, __file__)
| 1,890
| 29.015873
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_function_node.py
|
from __future__ import print_function
import threading
import unittest
import mock
import numpy
import pytest
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
# TODO(hvy): Remove the following import once testing.backend is imported
# in testing/__init__.py
import chainer.testing.backend
from chainer import utils
from chainer.utils import type_check
import chainerx
if chainerx.is_available():
import chainerx.testing
def make_array(start, shape, dtype, device):
size = numpy.product(shape, dtype='i')
a = numpy.arange(start, start + size)
a = a.reshape(shape)
a = a.astype(dtype, copy=False)
return device.send(a)
@testing.parameterize(*testing.product({
'y_shape': [(4,), (0,), (2, 3), ()],
'x_shape': [(3,), (0,), (4, 1), ()],
}))
class TestFunctionNode(unittest.TestCase):
def _get_method(self, prefix, gpu):
suffix = 'gpu' if gpu else 'cpu'
return getattr(self.f, prefix + '_' + suffix)
def _setup(self, device):
y_shape = self.y_shape
x_shape = self.x_shape
y1 = make_array(1, y_shape, numpy.float32, device)
y2 = make_array(2, y_shape, numpy.float32, device)
gx1 = chainer.Variable(
make_array(1, x_shape, numpy.float32, device))
gx2 = None
gy1 = make_array(1, y_shape, numpy.float32, device)
gy2 = make_array(1, y_shape, numpy.float32, device)
f = chainer.FunctionNode()
f.check_type_forward = mock.MagicMock()
f.forward_cpu = mock.MagicMock()
f.forward_gpu = mock.MagicMock()
f.backward = mock.MagicMock(return_value=(gx1, gx2))
self.f = f
self.x1 = make_array(0, x_shape, numpy.float32, device)
self.x2 = make_array(0, x_shape, numpy.int32, device)
self.y1 = y1
self.y2 = y2
self.gx1 = gx1
self.gx2 = gx2
self.gx1_orig = chainer.Variable(
make_array(3, x_shape, numpy.float32, device))
self.gx2_orig = chainer.Variable(
make_array(2, x_shape, numpy.float32, device))
self.gx1_accum = gx1 + self.gx1_orig
self.gy1 = gy1
self.gy2 = gy2
def tearDown(self):
# Set None to delete cuda array
self.f = None
self.y1 = None
self.y2 = None
self.gx1 = None
def setup_cpu(self):
self._setup(backend.CpuDevice())
self.f.forward_cpu = mock.MagicMock(return_value=(self.y1, self.y2))
def setup_gpu(self):
self._setup(backend.GpuDevice.from_device_id(0))
self.f.forward_gpu = mock.MagicMock(return_value=(self.y1, self.y2))
def setup_chainerx(self, device_name='native:0'):
self._setup(chainer.get_device(device_name))
self.f.forward = mock.MagicMock(side_effect=lambda inputs: (
utils.force_array(inputs[0] * inputs[1]),
utils.force_array(inputs[0] + inputs[1])))
def check_forward(self, gpu):
y1, y2 = self.f.forward((self.x1, self.x2))
self.assertEqual(self.f.check_type_forward.call_count, 0)
self.assertEqual(self._get_method('forward', not gpu).call_count, 0)
self._get_method('forward', gpu).assert_called_once_with(
(self.x1, self.x2))
self.assertTrue((cuda.to_cpu(y1) == cuda.to_cpu(self.y1)).all())
self.assertTrue((cuda.to_cpu(y2) == cuda.to_cpu(self.y2)).all())
def test_forward_cpu(self):
self.setup_cpu()
self.check_forward(False)
@attr.gpu
def test_forward_gpu(self):
self.setup_gpu()
self.check_forward(True)
def check_check_type_forward(self):
self.assertEqual(self.f.check_type_forward.call_count, 1)
ts = self.f.check_type_forward.call_args[0][0]
self.assertIsInstance(ts, type_check.LightTypeInfoTuple)
self.assertEqual(len(ts), 2)
t1 = ts[0]
assert t1.shape == self.x_shape
assert t1.dtype == numpy.float32
t2 = ts[1]
assert t2.shape == self.x_shape
assert t2.dtype == numpy.int32
def check_apply(self):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
x1._node._rank = 1
x2._node._rank = 3
ys = self.f.apply((x1, x2))
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
for y in ys:
self.assertIsInstance(y, chainer.Variable)
# rank is (maximum rank in xs) + 1
self.assertEqual(y.rank, 4)
self.assertIs(y.creator_node, self.f)
self.assertTrue(y.requires_grad)
self.assertIsInstance(y.creator_node.outputs, tuple)
def check_apply_chainerx(self):
x1 = chainer.Variable(self.x1)
# TODO(sonots): ChainerX does not support computing gradients for int32
x2 = chainer.Variable(self.x2, requires_grad=False)
ys = self.f.apply((x1, x2))
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
for y in ys:
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.data, chainerx.ndarray)
self.assertIs(y.data.device, self.x1.device)
self.assertTrue(y.requires_grad)
def test_apply_cpu(self):
self.setup_cpu()
self.check_apply()
@attr.gpu
def test_apply_gpu(self):
self.setup_gpu()
self.check_apply()
@attr.chainerx
def test_apply_chainerx_cpu(self):
self.setup_chainerx()
self.check_apply_chainerx()
@attr.chainerx
@attr.gpu
def test_apply_chainerx_gpu(self):
self.setup_chainerx('cuda:0')
self.check_apply_chainerx()
@attr.chainerx
@attr.multi_gpu(2)
def test_apply_chainerx_multi_gpu(self):
self.setup_chainerx('cuda:1')
self.check_apply_chainerx()
def check_apply_all_ndarray(self):
x1 = self.x1
x2 = self.x2
ys = self.f.apply((x1, x2))
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
xp = backend.get_array_module(x1)
for y in ys:
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.data, xp.ndarray)
self.assertFalse(y.requires_grad)
def test_apply_all_ndarray_cpu(self):
self.setup_cpu()
self.check_apply_all_ndarray()
@attr.gpu
def test_apply_all_ndarray_gpu(self):
self.setup_gpu()
self.check_apply_all_ndarray()
@attr.chainerx
def test_apply_all_ndarray_chainerx_cpu(self):
self.setup_chainerx()
self.check_apply_all_ndarray()
@attr.chainerx
@attr.gpu
def test_apply_all_ndarray_chainerx_gpu(self):
self.setup_chainerx('cuda:0')
self.check_apply_all_ndarray()
def check_apply_ndarray(self):
x1 = chainer.Variable(self.x1)
x2 = self.x2
x1._node._rank = 1
ys = self.f.apply((x1, x2))
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
for y in ys:
self.assertIsInstance(y, chainer.Variable)
# rank is (maximum rank in xs) + 1
self.assertEqual(y.rank, 2)
self.assertIs(y.creator_node, self.f)
self.assertTrue(y.requires_grad)
self.assertIsInstance(y.creator_node.outputs, tuple)
def check_apply_ndarray_chainerx(self):
x1 = chainer.Variable(self.x1)
x2 = self.x2
ys = self.f.apply((x1, x2))
self.assertEqual(len(ys), 2)
self.check_check_type_forward()
for y in ys:
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.data, chainerx.ndarray)
self.assertIs(y.data.device, self.x1.device)
self.assertTrue(y.requires_grad)
def test_apply_ndarray_cpu(self):
self.setup_cpu()
self.check_apply_ndarray()
@attr.gpu
def test_apply_ndarray_gpu(self):
self.setup_gpu()
self.check_apply_ndarray()
@attr.chainerx
def test_apply_ndarray_chainerx_cpu(self):
self.setup_chainerx()
self.check_apply_ndarray_chainerx()
@attr.chainerx
@attr.gpu
def test_apply_ndarray_chainerx_gpu(self):
self.setup_chainerx('cuda:0')
self.check_apply_ndarray_chainerx()
def check_apply_single_return_value(self):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
ret, = self.f.apply((x1, x2))
self.assertIsInstance(ret, chainer.Variable)
def check_apply_single_return_value_chainerx(self):
x1 = chainer.Variable(self.x1)
# TODO(sonots): ChainerX does not support computing gradients for int32
x2 = chainer.Variable(self.x2, requires_grad=False)
ret, = self.f.apply((x1, x2))
self.assertIsInstance(ret, chainer.Variable)
self.assertIsInstance(ret.data, chainerx.ndarray)
self.assertIs(ret.data.device, self.x1.device)
def test_apply_single_return_value_cpu(self):
self.setup_cpu()
self.f.forward_cpu.return_value = (self.y1,)
self.check_apply_single_return_value()
@attr.gpu
def test_apply_single_return_value_gpu(self):
self.setup_gpu()
self.f.forward_gpu.return_value = (self.y1,)
self.check_apply_single_return_value()
@attr.chainerx
def test_apply_single_return_value_chainerx_cpu(self):
self.setup_chainerx()
self.f.forward.side_effect = lambda inputs: (
utils.force_array(inputs[0] * inputs[1]),)
self.check_apply_single_return_value_chainerx()
@attr.chainerx
@attr.gpu
def test_apply_single_return_value_chainerx_gpu(self):
self.setup_chainerx('cuda:0')
self.f.forward.side_effect = lambda inputs: (
utils.force_array(inputs[0] * inputs[1]),)
self.check_apply_single_return_value_chainerx()
def _get_f(self):
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
y1, y2 = self.f.apply((x1, x2))
f = y1.creator_node
# To test weak refernece, return only x1 and y1.
# x2 and y2 are deleted by the garbage collector
return f, x1, y1
def test_unchain(self):
self.setup_cpu()
f, _x1, _y1 = self._get_f()
y1, y2 = f.outputs
f.unchain()
# As _y1 is alive, this weak ref is also alive
y1_ref = y1()
self.assertIsNotNone(y1_ref)
self.assertIsNone(y1_ref.creator)
# This weak ref is dead by unchain
y2_ref = y2()
self.assertIsNone(y2_ref)
self.assertIsNone(f.inputs)
def test_label(self):
self.setup_cpu()
self.assertEqual(self.f.label, 'FunctionNode')
class TestFunctionNodeMixChainerxAndXpArrays(unittest.TestCase):
class SimpleFunctionNode(chainer.FunctionNode):
def __init__(self, xp):
self.xp = xp
def forward(self, inputs):
x1, x2 = inputs
assert isinstance(x1, self.xp.ndarray)
assert isinstance(x2, self.xp.ndarray)
return x1 * x2,
def check_mix_xp(self, xp):
xp_x1 = xp.random.randn(2, 3).astype(numpy.float32)
xp_x2 = xp.random.randn(2, 3).astype(numpy.float32)
x2 = backend.to_chx(xp_x2)
fnode = self.SimpleFunctionNode(xp)
with self.assertRaises(TypeError):
fnode.apply((xp_x1, x2))
@attr.chainerx
def test_mix_numpy(self):
self.check_mix_xp(numpy)
@attr.chainerx
@attr.gpu
def test_mix_cupy(self):
self.check_mix_xp(cuda.cupy)
class TestFunctionNodeInvalidType(unittest.TestCase):
def test_forward_invalid1(self):
class FunctionNode(chainer.FunctionNode):
def check_type_forward(self, in_types):
x_type, = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim >= 2,
)
def forward(self, inputs):
return inputs
f = FunctionNode()
# OK
v = chainer.Variable(numpy.random.randn(1, 5).astype(numpy.float32))
result, = f.apply((v,))
assert isinstance(result, chainer.Variable)
# Incorrect dtype
# in py3, numpy dtypes are represented as class
msg = """\
Invalid operation is performed in: FunctionNode \\(Forward\\)
Expect: in_types\\[0\\]\\.dtype == <(type|class) 'numpy\\.float32'>
Actual: float64 \\!= <(type|class) 'numpy\\.float32'>"""
v = chainer.Variable(numpy.random.randn(1, 5))
with six.assertRaisesRegex(self, chainer.utils.type_check.InvalidType,
msg):
f.apply((v,))
# Incorrect dim
msg = """\
Invalid operation is performed in: FunctionNode \\(Forward\\)
Expect: in_types\\[0\\]\\.ndim >= 2
Actual: 1 < 2"""
v = chainer.Variable(numpy.random.randn(5).astype(numpy.float32))
with six.assertRaisesRegex(self, chainer.utils.type_check.InvalidType,
msg):
f.apply((v,))
class TestFunctionNodeForwardTypeCheck(unittest.TestCase):
def setUp(self):
self.x1 = numpy.random.rand(2, 3).astype(numpy.float32)
self.x2 = numpy.random.rand(2, 3).astype(numpy.float32)
def test_invalid_output_type(self):
class FunctionNode(chainer.FunctionNode):
def forward(self, inputs):
return object(),
f = FunctionNode()
x1 = chainer.Variable(self.x1)
with six.assertRaisesRegex(
self,
TypeError,
'forward output must be a tuple of ndarrays'):
f.apply((x1,))
@attr.gpu
def test_inconsistent_input_backends(self):
class FunctionNode(chainer.FunctionNode):
def forward(self, inputs):
return inputs
f = FunctionNode()
# Cause inconsistency between inputs
x1 = cuda.to_gpu(self.x1)
x1 = chainer.Variable(x1)
x2 = chainer.Variable(self.x2)
with self.assertRaises(TypeError):
f.apply((x1, x2))
@attr.gpu
def test_inconsistent_output_backends(self):
class FunctionNode(chainer.FunctionNode):
def forward(self, inputs):
# Cause inconsistency between outputs
return inputs[0], cuda.to_gpu(inputs[1])
f = FunctionNode()
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
with self.assertRaises(TypeError):
f.apply((x1, x2))
@testing.parameterize(
{'return_value': (numpy.array([float('nan')], numpy.float32),),
'valid': False},
{'return_value': (numpy.array([1], numpy.int32),), 'valid': True},
)
class TestFunctionNodeForwardDebug(unittest.TestCase):
def setUp(self):
self.original_debug = chainer.is_debug()
chainer.set_debug(True)
self.one = numpy.array([1], numpy.float32)
self.f = chainer.FunctionNode()
def tearDown(self):
chainer.set_debug(self.original_debug)
def check_debug_forward(self, x_data):
x = chainer.Variable(x_data)
if self.valid:
# check if forward throws nothing
self.f.apply((x,))
else:
with self.assertRaises(RuntimeError):
self.f.apply((x,))
def test_debug_forward_cpu(self):
self.f.forward_cpu = mock.MagicMock(return_value=self.return_value)
self.check_debug_forward(self.one)
@attr.gpu
def test_debug_forward_gpu(self):
return_value = tuple(None if x is None else cuda.to_gpu(x)
for x in self.return_value)
self.f.forward_gpu = mock.MagicMock(return_value=return_value)
self.check_debug_forward(cuda.to_gpu(self.one))
@testing.backend.inject_backend_tests(
None,
testing.product({'use_cuda': [True, False]}))
class TestFunctionNodeInvalidBackwardChecks(unittest.TestCase):
"""Tests FunctionNode.backward correctness checks"""
def setUp(self):
self.f = chainer.FunctionNode()
def _dummy_func(self, bwd_return_data):
# Create a dummy func that returns `bwd_return_data` in the
# `backward` method.
def one(xp):
return xp.array(1, numpy.float32)
class DummyFunc(chainer.FunctionNode):
def forward_cpu(self, inputs):
return one(numpy),
def forward_gpu(self, inputs):
return one(cuda.cupy),
def backward(self, indexes, grad_outputs):
return bwd_return_data
return DummyFunc()
def check_debug_backward_accumulate(
self, backend_config, f, xs_data, errors, initial_gxs=None):
# `errors` is a dict, where keys are True or False indicating the
# debug mode to run the test, and values are tuple of expected
# exception type and error message pattern.
for debug_mode, error in errors.items():
def to_xp(arrs):
if backend_config.use_cuda:
return cuda.to_gpu(arrs)
else:
return arrs
# Convert arrays to GPU
xs_data = to_xp(xs_data)
if initial_gxs is not None:
initial_gxs = to_xp(initial_gxs)
# Call forward
xs = [chainer.Variable(x) for x in xs_data]
y, = f.apply(xs)
# Set initial input grads, if given
if initial_gxs is not None:
assert len(xs) == len(initial_gxs)
for x, gx in zip(xs, initial_gxs):
x.grad = gx
# Call backward & check error
with chainer.using_config('debug', debug_mode):
if error is None:
y.backward() # no error should be raised
else:
error_type, error_regex = error
with pytest.raises(error_type, match=error_regex):
y.backward()
def test_ok(self, backend_config):
self.check_debug_backward_accumulate(
backend_config,
f=self._dummy_func((
chainer.Variable(numpy.array([2.0], numpy.float32)),)),
xs_data=(numpy.array([1], numpy.float32),),
errors={False: None, True: None})
def test_gradients_has_nan(self, backend_config):
# Returns a gradient that has NaN value
self.check_debug_backward_accumulate(
backend_config,
f=self._dummy_func((chainer.Variable(numpy.array(
[float('nan')], numpy.float32)),)),
xs_data=(numpy.array([1], numpy.float32),),
errors={True: (RuntimeError,
'NaN is detected on backward computation')})
def test_invalid_number_of_gradients(self, backend_config):
# Returns more gradients than expected
self.check_debug_backward_accumulate(
backend_config,
f=self._dummy_func((
chainer.Variable(numpy.array([2.0], numpy.float32)),
chainer.Variable(numpy.array([1.0], numpy.float32)))),
xs_data=(numpy.array([1], numpy.float32),),
errors={True: (ValueError,
'number of gradients returned from backward is '
'incorrect')})
def test_invalid_zero_gradients(self, backend_config):
# Returns 0 gradients while 1 expected
self.check_debug_backward_accumulate(
backend_config,
f=self._dummy_func(()),
xs_data=(numpy.array([1], numpy.float32),),
errors={True: (ValueError,
'number of gradients returned from backward is '
'incorrect')})
def test_invalid_gradient_shape(self, backend_config):
# Returns gradient of incorrect shape
self.check_debug_backward_accumulate(
backend_config,
f=self._dummy_func((
chainer.Variable(
backend_config.xp.array([2, 3], numpy.float32)),)),
xs_data=(numpy.array([1], numpy.float32),),
errors={True: (ValueError,
'shape of gradients returned from backward is '
'incorrect')})
def test_invalid_gradient_type(self, backend_config):
# Incorrectly returns a gradient as ndarray instead of variable
self.check_debug_backward_accumulate(
backend_config,
f=self._dummy_func((
backend_config.xp.array([2.0], numpy.float32))),
xs_data=(numpy.array([1], numpy.float32),),
errors={True: (ValueError,
'type of gradients returned from backward is '
'incorrect')})
def test_invalid_gradient_dtype(self, backend_config):
# Incorrectly returns a gradient with incorrect dtype, compared to
# initially set gradients.
self.check_debug_backward_accumulate(
backend_config,
f=self._dummy_func((
chainer.Variable(
backend_config.xp.array([2.0], numpy.int64)),)),
xs_data=(numpy.array([1], numpy.float32),),
initial_gxs=(numpy.array([1], numpy.float32),),
errors={True: (ValueError,
'dtype of gradients returned from backward is '
'incorrect')})
class TestNoBackpropMode(unittest.TestCase):
def setUp(self):
self.x = chainer.Variable(numpy.array([1.], 'f'))
def test_no_backprop_mode(self):
y = self.x + 1
self.assertTrue(y.creator_node is not None)
with chainer.no_backprop_mode():
y = self.x + 1
self.assertTrue(y.creator_node is None)
y = self.x + 1
self.assertTrue(y.creator_node is not None)
def test_force_backprop_mode(self):
with chainer.no_backprop_mode():
with chainer.force_backprop_mode():
y = self.x + 1
self.assertTrue(y.creator_node is not None)
y = self.x + 1
self.assertTrue(y.creator_node is not None)
with chainer.force_backprop_mode():
y = self.x + 1
self.assertTrue(y.creator_node is not None)
@attr.chainerx
def test_backprop_mode_affects_chainerx(self):
# chainer.{no,force}_backprop_mode should affect chainerx's
# counterpart.
assert chainerx.is_backprop_required()
# nobp
with chainer.no_backprop_mode():
assert not chainerx.is_backprop_required()
# nobp > forcebp
with chainer.force_backprop_mode():
assert chainerx.is_backprop_required()
# nobp > nobp
with chainer.no_backprop_mode():
assert not chainerx.is_backprop_required()
assert chainerx.is_backprop_required()
# forcebp
with chainer.force_backprop_mode():
assert chainerx.is_backprop_required()
# forcebp > forcebp
with chainer.force_backprop_mode():
assert chainerx.is_backprop_required()
# forcebp > nobp
with chainer.no_backprop_mode():
assert not chainerx.is_backprop_required()
assert chainerx.is_backprop_required()
class MyThread(threading.Thread):
def run(self):
x = chainer.Variable(numpy.array([1], dtype='f'))
with chainer.no_backprop_mode():
y = x + 1
self.creator_is_none = y.creator_node is None
class TestBackpropModeMultiThread(unittest.TestCase):
def test_multi_thread(self):
t = MyThread()
t.start()
t.join()
self.assertTrue(t.creator_is_none)
class FunctionNodeWithRetaining(chainer.FunctionNode):
def __init__(self, input_indices, output_indices):
self.input_indices = input_indices
self.output_indices = output_indices
def forward(self, inputs):
self.retain_inputs(self.input_indices)
self.retain_outputs(self.output_indices)
return inputs
def backward(self, _, grad_outputs):
self.retained_backward_inputs = self.get_retained_inputs()
self.retained_backward_outputs = self.get_retained_outputs()
return grad_outputs
@testing.backend.inject_backend_tests(
None,
[
{},
{'use_cuda': True},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
])
class TestFunctionNodeRetaining(unittest.TestCase):
def test_retain(self, backend_config):
xp = backend_config.xp
input_arrs = backend_config.get_array([
numpy.array([2], dtype=numpy.float32),
numpy.array([-1], dtype=numpy.float32)])
inputs = [
chainer.Variable(input_arrs[0]),
chainer.Variable(input_arrs[1], requires_grad=False)]
input_arrays = [x.array for x in inputs]
if xp is not chainerx:
input_nodes = [x.node for x in inputs]
f = FunctionNodeWithRetaining([1], [0, 1])
outputs = f.apply(inputs)
outputs[0].grad = backend_config.get_array(
numpy.array([1], dtype=numpy.float32))
outputs[0].backward()
output_arrays = [y.array for y in outputs]
inputs = None # release non-retained inputs
assert len(f.retained_backward_inputs) == 1
assert len(f.retained_backward_outputs) == 2
if xp is not chainerx:
assert f.retained_backward_inputs[0].node is input_nodes[1]
xp.testing.assert_array_equal(
f.retained_backward_inputs[0].array, input_arrays[1])
xp.testing.assert_array_equal(
f.retained_backward_outputs[0].array, output_arrays[0])
xp.testing.assert_array_equal(
f.retained_backward_outputs[1].array, output_arrays[1])
def check_no_retain(self, backend_config, skip_call):
# This test ensures get_retained_{in,out}puts returns () if no
# input/output is retained.
# skip_call: If False, retain_{in,out}puts() is not called.
class MyFunc(chainer.FunctionNode):
backward_called = 0
def forward(self, inputs):
x, = inputs
if not skip_call:
self.retain_outputs(())
self.retain_inputs(())
return x * 3,
def backward(self, input_indices, grad_outputs):
self.backward_called += 1
assert self.get_retained_outputs() == ()
assert self.get_retained_inputs() == ()
gy, = grad_outputs
return gy * 3,
x_arr = backend_config.get_array(numpy.array([1, 2], numpy.float32))
x = chainer.Variable(x_arr, requires_grad=True)
func = MyFunc()
y, = func.apply((x,))
y.grad = backend_config.get_array(numpy.array([1, 1], numpy.float32))
y.backward()
assert func.backward_called == 1
def test_no_retain(self, backend_config):
self.check_no_retain(backend_config, False)
self.check_no_retain(backend_config, True)
def _get_value(x):
if isinstance(x, chainer.Variable):
return x.data
return x
class TestGradTypeCheck(unittest.TestCase):
def test_type_check(self):
x = chainer.Variable(numpy.random.uniform(-1, 1, (2, 3)).astype('f'))
y = x * x
gx = chainer.Variable(numpy.random.uniform(-1, 1, (2, 3)).astype('f'))
gy = chainer.Variable(numpy.random.uniform(-1, 1, (2, 3)).astype('f'))
chainer.grad([y], [x], [gx], [gy])
chainer.grad((y,), (x,), (gx,), (gy,))
with self.assertRaises(TypeError):
chainer.grad(y, [x], [gx], [gy])
with self.assertRaises(TypeError):
chainer.grad([y], x, [gx], [gy])
with self.assertRaises(TypeError):
chainer.grad([y], [x], gx, [gy])
with self.assertRaises(TypeError):
chainer.grad([y], [x], [gx], gy)
class TestGradValueCheck(unittest.TestCase):
def test_length_check(self):
x = chainer.Variable(numpy.array(3, numpy.float32))
y = chainer.functions.identity(x)
with self.assertRaises(ValueError):
chainer.grad([y], [x], [], [None])
with self.assertRaises(ValueError):
chainer.grad([y], [x], [None, None], [None])
with self.assertRaises(ValueError):
chainer.grad([y], [x], [None], [])
with self.assertRaises(ValueError):
chainer.grad([y], [x], [None], [None, None])
class GradTestBase(object):
shape = 3,
x_names = ()
y_names = ()
loss_scale = None
extend_graph_x = False
extend_graph_y = False
def _init_attrs(self, names):
ret = []
for name in names:
v = chainer.Variable(
numpy.random.randint(-4, 6, self.shape).astype('f'), name=name)
ret.append(v)
setattr(self, name, v)
return ret
def _init_ones(self, names):
ret = []
for name in names:
v = chainer.Variable(numpy.ones(self.shape, dtype='f'))
ret.append(v)
setattr(self, name, v)
return ret
@staticmethod
def _get_value(x):
if isinstance(x, chainer.Variable):
return x.data
return x
@staticmethod
def _to_grad_names(names):
return ['g%s' % name for name in names]
def setUp(self):
self.xs = self._init_attrs(self.x_names)
self.gxs = self._init_attrs(self._to_grad_names(self.x_names))
self.gys = self._init_attrs(self._to_grad_names(self.y_names))
if self.loss_scale is not None:
self._init_ones(self._to_grad_names(self.y_names))
self.gys = None
def use_device(self, device):
for value in six.itervalues(self.__dict__):
if isinstance(value, chainer.Variable):
value.to_device(device)
def forward(self):
raise NotImplementedError
def expected_grad(self):
raise NotImplementedError
def expected_double_grad(self):
raise NotImplementedError
def _print_variables(self, name, vs):
print('{}: '.format(name), end='')
print(*(self._get_value(v) for v in vs), sep=', ')
def _print_inputs(self):
self._print_variables('xs ', self.xs)
self._print_variables('gxs ', self.gxs)
self._print_variables('gys ', self.gys)
def check_grad(self):
self.forward()
ys = [getattr(self, name) for name in self.y_names]
if self.extend_graph_y:
self._ys = [v * 1. for v in ys]
# graph_x extension should be done here
# to avoid chainer/chainerx mixed graph
if self.extend_graph_x:
for v in self.xs:
v *= 1.
gxs = chainer.grad(ys, self.xs, self.gys, self.gxs,
loss_scale=self.loss_scale)
expected = self.expected_grad()
for i, gx in enumerate(self.gxs):
expected[i] += gx
self.assertEqual(len(gxs), len(expected))
try:
for a, e in zip(gxs, expected):
testing.assert_allclose(self._get_value(a), self._get_value(e))
except Exception:
self._print_inputs()
self._print_variables('gxs (actual) ', gxs)
self._print_variables('gxs (expected)', expected)
raise
def test_grad(self, backend_config):
self.use_device(backend_config.device)
self.check_grad()
def check_double_grad(self):
self.forward()
ys = [getattr(self, name) for name in self.y_names]
gxs = chainer.grad(ys, self.xs, self.gys, self.gxs,
enable_double_backprop=True,
loss_scale=self.loss_scale)
y = sum(gxs)
ggxs = chainer.grad([y], self.xs)
expected = self.expected_double_grad()
self.assertEqual(len(ggxs), len(expected))
try:
for a, e in zip(ggxs, expected):
testing.assert_allclose(self._get_value(a), self._get_value(e))
except Exception:
self._print_inputs()
self._print_variables('gxs ', gxs)
self._print_variables('ggxs (actual) ', ggxs)
self._print_variables('ggxs (expected)', expected)
raise
def test_double_grad(self, backend_config):
self.use_device(backend_config.device)
self.check_double_grad()
@testing.parameterize(*testing.product({
'loss_scale': [None, 1, 10],
}))
@testing.backend.inject_backend_tests(
None,
[
{},
{'use_ideep': 'always'},
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
]
)
class TestGradSimple(GradTestBase, unittest.TestCase):
x_names = 'x',
y_names = 'y',
def forward(self):
self.y = self.x * self.x
def expected_grad(self):
grad = 2 * self.x * self.gy
if self.loss_scale is not None:
grad *= self.loss_scale
return [grad]
def expected_double_grad(self):
ggrad = 2 * self.gy
if self.loss_scale is not None:
ggrad *= self.loss_scale
return [ggrad]
@testing.parameterize(*testing.product({
'loss_scale': [None, 1, 1.5, 2.5, 10],
}))
@testing.backend.inject_backend_tests(
None,
[
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class TestGradSimpleChainerX(GradTestBase, unittest.TestCase):
x_names = 'x',
y_names = 'y',
def forward(self):
self.y = self.x * self.x
def expected_grad(self):
grad = 2 * self.x * self.gy
return [grad]
def expected_double_grad(self):
ggrad = 2 * self.gy
return [ggrad]
@testing.parameterize(*testing.product({
'extend_graph_x': [False, True],
'extend_graph_y': [False, True],
}))
@testing.backend.inject_backend_tests(
None,
[
{},
{'use_ideep': 'always'},
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class TestGradComplex(GradTestBase, unittest.TestCase):
x_names = 'x1', 'x2'
y_names = 'y1', 'y2'
def forward(self):
self.z = self.x1 * self.x1
self.y1 = self.z + self.x1 * self.x2 + self.x2
self.y2 = self.z + self.y1
def expected_grad(self):
dz_dx = 2 * self.x1
dy1_dx = self.gy1 + self.gy2
return [dy1_dx * (dz_dx + self.x2) + self.gy2 * dz_dx,
dy1_dx * (self.x1 + 1)]
def expected_double_grad(self):
dy1_dx = self.gy1 + self.gy2
return [3 * dy1_dx + 2 * self.gy2, dy1_dx]
class ExpPair(chainer.FunctionNode):
def forward(self, inputs):
x, = inputs
xp = backend.get_array_module(x)
self.retain_outputs((0, 1))
return xp.exp(x), xp.exp(x)
def backward(self, target_input_indexes, grad_outputs):
return sum([
g * exp
for g, exp in zip(grad_outputs, self.get_retained_outputs())
if g is not None
]),
def exp_pair(x):
return ExpPair().apply((x,))
@testing.parameterize(*testing.product({
'keep_y2': [False, True],
}))
@testing.backend.inject_backend_tests(
None,
[
{},
{'use_ideep': 'always'},
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class TestGradDelRetainedOutput(GradTestBase, unittest.TestCase):
x_names = 'x1',
y_names = 'y1',
def forward(self):
self.y1, y2 = exp_pair(self.x1)
if self.keep_y2:
self.y2 = y2
def expected_grad(self):
return [self.gy1 * self.y1]
def expected_double_grad(self):
return [self.gy1 * self.y1]
class ExpAndExpm1(chainer.FunctionNode):
def forward(self, inputs):
x, = inputs
xp = backend.get_array_module()
y0 = xp.exp(x)
y1 = xp.expm1(x)
self.retain_outputs((0,))
return y0, y1
def backward(self, target_input_indexes, grad_outputs):
g0, g1 = grad_outputs
y0, = self.get_retained_outputs()
gx = []
if g0 is not None:
gx.append(g0 * y0)
if g1 is not None:
gx.append(g1 * y0)
return chainer.functions.add(*gx),
def exp_and_expm1(x):
return ExpAndExpm1().apply((x,))
class TestGradDelRetainedOutput2(unittest.TestCase):
def test_retain_output(self):
xp = numpy
x_array = xp.random.randn(3)
y1_grad = xp.random.randn(3)
x_grad_grad = xp.random.randn(3)
x = chainer.Variable(x_array, name='x')
y0, y1 = exp_and_expm1(x)
del y0
# (x: Variable) requires grad
# (y1_grad: ndarray) does not require grad
gx, = chainer.grad([y1], [x], [y1_grad], enable_double_backprop=True)
# assert gx == exp(x) * y1_grad
xp.testing.assert_allclose(
gx.array,
xp.exp(x.array) * y1_grad)
gx_, = chainer.grad([gx], [x], [x_grad_grad])
xp.testing.assert_allclose(
gx_.array,
gx.array * x_grad_grad)
class TestUnchainSplitGrad(unittest.TestCase):
def test_unchain_split(self):
x = chainer.Variable(numpy.arange(4).astype('f').reshape(2, 2))
h0, h1 = chainer.functions.split_axis(x, [1], axis=0)
y = chainer.functions.sum(h0)
z = chainer.functions.sum(h1)
w = y + z
h0.unchain()
dy_dh0 = numpy.array([[1., 1.]])
dz_dh1 = numpy.array([[1., 1.]])
dy_dx = None
dz_dx = numpy.array([[0., 0.], [1., 1.]])
dw_dx = numpy.array([[0., 0.], [1., 1.]])
testing.assert_allclose(chainer.grad([y], [h0])[0].array, dy_dh0)
testing.assert_allclose(chainer.grad([z], [h1])[0].array, dz_dh1)
assert chainer.grad([y], [x])[0] is dy_dx
testing.assert_allclose(chainer.grad([z], [x])[0].array, dz_dx)
testing.assert_allclose(chainer.grad([w], [x])[0].array, dw_dx)
class TestGradV3Compat1(unittest.TestCase):
def _var(self, val):
return chainer.Variable(numpy.array(val, numpy.float32))
def check(self, option, grads_before, grads_after):
vs = []
v = self._var(0.5)
for _ in range(4):
vs.append(v)
v += v
vs.append(v)
v *= 1.
_, x1, _, x2, _, y1, _, y2 = vs
gx1 = self._var(1000.)
gx2 = self._var(100.)
gy1 = self._var(10.)
gy2 = self._var(1.)
for v, g in zip(vs, grads_before):
if g is not None:
v.grad_var = self._var(g)
grads = chainer.grad(
[y1, y2], [x1, x2], [gy1, gy2], [gx1, gx2], **option)
numpy.testing.assert_allclose(grads[0].array, 1248.)
numpy.testing.assert_allclose(grads[1].array, 124.)
for v, ans in zip(vs, grads_after):
if ans is None:
self.assertIsNone(v.grad)
else:
numpy.testing.assert_allclose(v.grad, ans)
def test_no_option(self):
self.check({}, [None] * 8, [None] * 8)
self.check({}, [-1.] * 8, [-1.] * 8)
def test_set_grad(self):
self.check(
{'set_grad': True},
[None] * 8,
[None, 1248., None, 124., None, None, None, None])
self.check(
{'set_grad': True},
[-1.] * 8,
[-1., 1248., -1., 124., -1., -1., -1., -1.])
def test_retain_grad(self):
self.check(
{'retain_grad': True},
[None] * 8,
[None, 1248., 248., 124., 24., 12., 2., 1.]
# Before v5, the result was
# [None, 1248., 248., 124., 24., 12., 2., None]
)
self.check(
{'retain_grad': True},
[-1.] * 8,
[-1., 1248., 248., 124., 24., 12., 2., 1.]
# Before v5, the result was
# [-1., 1248., 248., 124., 24., 12., 2., -1.]
)
@attr.chainerx
class TestFunctionNodeBackwardChainerx(unittest.TestCase):
class SimpleFunctionNode(chainer.FunctionNode):
def __init__(self, backward_call_callback):
self.backward_call_callback = backward_call_callback
def forward(self, inputs):
return tuple([2 * x for x in inputs])
def backward(self, indexes, grad_outputs):
self.backward_call_callback({
'indexes': indexes, 'grad_outputs': grad_outputs})
gxs = []
for i_in in indexes:
gx = 2 * grad_outputs[i_in]
gxs.append(gx)
return gxs
def test_backward(self):
shape = (2, 3)
dtype = numpy.float32
x1 = chainerx.full(shape, 3, dtype)
x2 = chainerx.full(shape, 5, dtype).require_grad()
gx2_expected = numpy.full(shape, 2, dtype)
backward_call_args = []
def backward_call_callback(call_arg):
backward_call_args.append(call_arg)
# forward
func = self.SimpleFunctionNode(backward_call_callback)
y1, y2 = func.apply((x1, x2))
del func
assert y1.requires_grad
assert y2.requires_grad
# backward
y2.backward()
# check backward call arguments
assert len(backward_call_args) == 1
call_arg, = backward_call_args
assert isinstance(call_arg['indexes'], tuple)
assert call_arg['indexes'] == (1,)
assert isinstance(call_arg['grad_outputs'], tuple)
assert len(call_arg['grad_outputs']) == 2
assert call_arg['grad_outputs'][0] is None
chainerx.testing.assert_array_equal_ex(
call_arg['grad_outputs'][1].array, numpy.full(shape, 1, dtype),
strides_check=False)
# check grads
chainerx.testing.assert_array_equal_ex(
x2.grad, gx2_expected, strides_check=False)
assert not x2.grad.is_backprop_required()
with pytest.raises(chainerx.ChainerxError):
x1.grad
@attr.gpu
def test_backward_default_device(self):
# Default device in backward should be determined by arrays,
# otherwise, creation routines in backward do not create new arrays
# on the proper device.
device = chainerx.get_device('cuda:0')
shape = (2, 3)
dtype = numpy.float32
x1 = chainerx.full(shape, 3, dtype, device=device)
x2 = chainerx.full(shape, 5, dtype, device=device).require_grad()
backward_call_new_array = []
def backward_call_callback(call_arg):
backward_call_new_array.append(chainerx.empty(shape, dtype))
with chainerx.using_device('native:0'):
# forward
func = self.SimpleFunctionNode(backward_call_callback)
y1, y2 = func.apply((x1, x2))
# backward
y2.backward()
assert backward_call_new_array[0].device is device
testing.run_module(__name__, __file__)
| 43,917
| 30.550287
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_init.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
class TestUseCuDNN(unittest.TestCase):
@attr.cudnn
def test_valid_case_combination(self):
with chainer.using_config('use_cudnn', 'always'):
self.assertTrue(chainer.should_use_cudnn('==always'))
self.assertTrue(chainer.should_use_cudnn('>=auto'))
with chainer.using_config('use_cudnn', 'auto'):
self.assertFalse(chainer.should_use_cudnn('==always'))
self.assertTrue(chainer.should_use_cudnn('>=auto'))
with chainer.using_config('use_cudnn', 'never'):
self.assertFalse(chainer.should_use_cudnn('==always'))
self.assertFalse(chainer.should_use_cudnn('>=auto'))
@unittest.skipIf(cuda.cudnn_enabled, 'cudnn unavailable')
def test_no_cudnn_available(self):
with chainer.using_config('use_cudnn', 'always'):
self.assertFalse(chainer.should_use_cudnn('==always'))
self.assertFalse(chainer.should_use_cudnn('>=auto'))
@attr.cudnn
def test_invalid_level(self):
self.assertRaises(ValueError, chainer.should_use_cudnn, '==auto')
@attr.cudnn
def test_invalid_config(self):
with chainer.using_config('use_cudnn', True):
self.assertRaises(ValueError, chainer.should_use_cudnn, '>=auto')
with chainer.using_config('use_cudnn', False):
self.assertRaises(ValueError, chainer.should_use_cudnn, '>=auto')
with chainer.using_config('use_cudnn', 'on'):
self.assertRaises(ValueError, chainer.should_use_cudnn, '>=auto')
@attr.cudnn
def test_higher_version_required(self):
with chainer.using_config('use_cudnn', 'always'):
self.assertFalse(chainer.should_use_cudnn(
'>=auto', cuda.cuda.cudnn.getVersion() + 1))
class TestDtype(unittest.TestCase):
def test_numpy_dtypes(self):
for dtype in (numpy.float16, numpy.float32, numpy.float64):
with chainer.using_config('dtype', dtype):
self.assertEqual(chainer.get_dtype(), numpy.dtype(dtype))
def test_specified_dtype(self):
with chainer.using_config('dtype', numpy.float64):
dtype = numpy.float16
self.assertEqual(chainer.get_dtype(dtype), numpy.dtype(dtype))
def test_mixed16_dtype(self):
with chainer.using_config('dtype', chainer.mixed16):
self.assertEqual(chainer.get_dtype(),
numpy.dtype(numpy.float16))
self.assertEqual(chainer.get_dtype(map_mixed16=numpy.float32),
numpy.dtype(numpy.float32))
def test_specified_mixed16_dtype(self):
with chainer.using_config('dtype', numpy.float64):
self.assertEqual(chainer.get_dtype(chainer.mixed16),
numpy.dtype(numpy.float16))
self.assertEqual(
chainer.get_dtype(chainer.mixed16, map_mixed16=numpy.float32),
numpy.dtype(numpy.float32))
testing.run_module(__name__, __file__)
| 3,140
| 36.392857
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_function_hook.py
|
import unittest
import mock
import numpy
import chainer
from chainer import testing
class TestFunctionHook(unittest.TestCase):
def setUp(self):
self.h = chainer.FunctionHook()
def test_name(self):
self.assertEqual(self.h.name, 'FunctionHook')
def test_forward_preprocess(self):
self.assertTrue(hasattr(self.h, 'forward_preprocess'))
def test_forward_postprocess(self):
self.assertTrue(hasattr(self.h, 'forward_postprocess'))
def test_backward_preprocess(self):
self.assertTrue(hasattr(self.h, 'backward_preprocess'))
def test_backward_postprocess(self):
self.assertTrue(hasattr(self.h, 'backward_postprocess'))
def check_hook_methods_called(self, func):
def check_method_called(name):
with mock.patch.object(self.h, name) as patched:
with self.h:
func()
patched.assert_called()
check_method_called('forward_preprocess')
check_method_called('forward_postprocess')
check_method_called('backward_preprocess')
check_method_called('backward_postprocess')
def test_all_called_with_backward(self):
x = chainer.Variable(numpy.random.rand(2, 3).astype(numpy.float32))
y = chainer.functions.sum(x * x)
self.check_hook_methods_called(y.backward)
def test_all_called_with_grad(self):
x = chainer.Variable(numpy.random.rand(2, 3).astype(numpy.float32))
y = chainer.functions.sum(x * x)
self.check_hook_methods_called(lambda: chainer.grad([y], [x]))
testing.run_module(__name__, __file__)
| 1,627
| 29.148148
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_function_and_function_node.py
|
import unittest
import numpy
import chainer
from chainer import testing
import chainer.testing.backend
import chainerx
def _get_expected_xp(backend_config, is_function):
# Returns a pair of xp's expected in forward() and backward() respectively.
xp = backend_config.xp
if xp is chainerx:
forward_xp = backend_config.device.fallback_device.xp
else:
forward_xp = xp
if is_function:
# chainer.Function
backward_xp = forward_xp
else:
# chainer.FunctionNode
backward_xp = xp
return forward_xp, backward_xp
@testing.parameterize(*testing.product({
'function_node': [True, False],
}))
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestFunctionBackprop(unittest.TestCase):
def call_func_function(self, backend_config, x1, x2, x3):
forward_xp, backward_xp = _get_expected_xp(backend_config, True)
class Func(chainer.Function):
def __init__(self):
self.array_init = backend_config.device.send(
numpy.array([3], numpy.float32))
def forward(self, inputs):
# Inputs
assert isinstance(inputs, tuple)
# x1, x3: float32
# x2: int32
x1, x2, x3 = inputs
assert isinstance(x1, forward_xp.ndarray)
assert isinstance(x2, forward_xp.ndarray)
assert isinstance(x3, forward_xp.ndarray)
# attribute fallback
assert isinstance(self.array_init, forward_xp.ndarray)
self.array_forward = forward_xp.array([2], numpy.float32)
assert isinstance(self.array_forward, forward_xp.ndarray)
y1 = x2 - 1 # int32
y2 = x1 * x3 + x2.astype(x1.dtype)
y3 = x1 + x3
self.retain_inputs((0, 2))
self.retain_outputs((0, 1,))
return y1, y2, y3
def backward(self, inputs, grad_outputs):
# Retained inputs
assert isinstance(inputs, tuple)
x1, x2, x3 = inputs
assert isinstance(x1, backward_xp.ndarray)
assert x2 is None # not retained
assert isinstance(x3, backward_xp.ndarray)
# Output gradients
assert isinstance(grad_outputs, tuple)
gy1, gy2, gy3 = grad_outputs
assert gy1 is None # y1 is int32
# y3 is disconnected
# TODO(niboshi): Expression after "or" is workaround for
# chainerx. ChainerX backward should return None for
# disconnected output and this workaround should be removed.
assert (gy3 is None
or (float(gy3.max()) == 0
and float((-gy3).max()) == 0))
# Retained outputs
output_data = self.output_data
assert isinstance(output_data, tuple)
y1, y2, y3 = output_data
assert isinstance(y1, backward_xp.ndarray)
assert isinstance(y2, backward_xp.ndarray)
assert y3 is None
# attribute fallback
assert isinstance(self.array_init, backward_xp.ndarray)
assert isinstance(self.array_forward, backward_xp.ndarray)
self.array_backward = backward_xp.array([4], numpy.float32)
assert isinstance(self.array_backward, backward_xp.ndarray)
gx1 = x3 * gy2 # + gy3
gx2 = None
gx3 = x1 * gy2 # + gy3
return gx1, gx2, gx3
return Func()(x1, x2, x3)
def call_func_function_node(self, backend_config, x1, x2, x3):
forward_xp, backward_xp = _get_expected_xp(backend_config, False)
class Func(chainer.FunctionNode):
def __init__(self):
self.array_init = backend_config.device.send(
numpy.array([3], numpy.float32))
def forward(self, inputs):
# Inputs
# x1, x3: float32
# x2: int32
x1, x2, x3 = inputs
assert isinstance(x1, forward_xp.ndarray)
assert isinstance(x2, forward_xp.ndarray)
assert isinstance(x3, forward_xp.ndarray)
# attribute fallback
assert isinstance(self.array_init, forward_xp.ndarray)
self.array_forward = forward_xp.array([2], numpy.float32)
assert isinstance(self.array_forward, forward_xp.ndarray)
y1 = x2 - 1 # int32
y2 = x1 * x3 + x2.astype(x1.dtype)
y3 = x1 + x3
self.retain_inputs((0, 2))
self.retain_outputs((0, 1,))
return y1, y2, y3
def backward(self, input_indexes, grad_outputs):
# Input indexes
assert isinstance(input_indexes, tuple)
assert input_indexes == (0, 2)
# Retained inputs
retained_inputs = self.get_retained_inputs()
assert isinstance(retained_inputs, tuple)
x1, x3 = retained_inputs
assert isinstance(x1.array, backward_xp.ndarray)
assert isinstance(x3.array, backward_xp.ndarray)
# Output gradients
assert isinstance(grad_outputs, tuple)
gy1, gy2, gy3 = grad_outputs
assert gy1 is None # y1 is int32
assert isinstance(gy2.array, backward_xp.ndarray)
# y3 is disconnected
# TODO(niboshi): Expression after "or" is workaround for
# chainerx. ChainerX backward should return None for
# disconnected output and this workaround should be removed.
assert (gy3 is None
or (float(gy3.array.max()) == 0
and float((-gy3.array).max()) == 0))
# Retained outputs
retained_outputs = self.get_retained_outputs()
assert isinstance(retained_outputs, tuple)
y1, y2, = retained_outputs
assert isinstance(y1.array, backward_xp.ndarray)
assert isinstance(y2.array, backward_xp.ndarray)
# attribute fallback
assert isinstance(self.array_init, backward_xp.ndarray)
assert isinstance(self.array_forward, backward_xp.ndarray)
self.array_backward = backward_xp.array([4], numpy.float32)
assert isinstance(self.array_backward, backward_xp.ndarray)
gx1 = x3 * gy2 # + gy3
gx2 = None
gx3 = x1 * gy2 # + gy3
return gx1, gx2, gx3
return Func().apply((x1, x2, x3))
def call_func(self, backend_config, x1, x2, x3):
if self.function_node:
return self.call_func_function_node(backend_config, x1, x2, x3)
else:
return self.call_func_function(backend_config, x1, x2, x3)
def test_backprop(self, backend_config):
x1_arr = numpy.array([2, 3], numpy.float32)
x2_arr = numpy.array([3, 1], numpy.int32)
x3_arr = numpy.array([5, 2], numpy.float32)
gy2_arr = numpy.array([2, 4], numpy.float32)
x1_arr, x2_arr, x3_arr, gy2_arr = backend_config.get_array(
(x1_arr, x2_arr, x3_arr, gy2_arr))
x1 = chainer.Variable(x1_arr)
x2 = chainer.Variable(x2_arr, requires_grad=False)
x3 = chainer.Variable(x3_arr)
# Forward
y1, y2, y3 = self.call_func(backend_config, x1, x2, x3)
assert isinstance(y1.array, backend_config.xp.ndarray)
assert isinstance(y2.array, backend_config.xp.ndarray)
assert isinstance(y3.array, backend_config.xp.ndarray)
# Backward
y2.grad = gy2_arr
y2.backward()
assert isinstance(x1.grad, backend_config.xp.ndarray)
assert x2.grad is None
assert isinstance(x3.grad, backend_config.xp.ndarray)
@testing.parameterize(*testing.product({
'function_node': [True, False],
}))
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestFunctionInputNone(unittest.TestCase):
def call_func_function(self, backend_config, x2):
forward_xp, backward_xp = _get_expected_xp(backend_config, True)
class Func(chainer.Function):
def forward(self, inputs):
# Inputs
assert isinstance(inputs, tuple)
x1, x2, x3 = inputs
assert x1 is None
assert isinstance(x2, forward_xp.ndarray)
assert x3 is None
y1 = x2 * 3
self.retain_inputs((1, 2))
self.retain_outputs(())
return y1,
def backward(self, inputs, grad_outputs):
# Retained inputs
assert isinstance(inputs, tuple)
x1, x2, x3 = inputs
assert x1 is None
assert isinstance(x2, backward_xp.ndarray)
assert x3 is None
# Output gradients
assert isinstance(grad_outputs, tuple)
gy1, = grad_outputs
assert isinstance(gy1, backward_xp.ndarray)
# Retained outputs
output_data = self.output_data
assert isinstance(output_data, tuple)
y1, = output_data
assert y1 is None
gx2 = 3 * gy1
return None, gx2, None
return Func()(None, x2, None),
def call_func_function_node(self, backend_config, x2):
forward_xp, backward_xp = _get_expected_xp(backend_config, False)
class Func(chainer.FunctionNode):
def forward(self, inputs):
# Inputs
x1, x2, x3 = inputs
assert x1 is None
assert isinstance(x2, forward_xp.ndarray)
assert x3 is None
y1 = x2 * 3
self.retain_inputs((1, 2))
self.retain_outputs(())
return y1,
def backward(self, input_indexes, grad_outputs):
# Input indexes
assert isinstance(input_indexes, tuple)
assert input_indexes == (1,)
# Retained inputs
retained_inputs = self.get_retained_inputs()
assert isinstance(retained_inputs, tuple)
x2, x3 = retained_inputs
assert isinstance(x2.array, backward_xp.ndarray)
assert x3 is None
# Output grads
assert isinstance(grad_outputs, tuple)
gy1, = grad_outputs
assert isinstance(gy1.array, backward_xp.ndarray)
# Retained outputs
retained_outputs = self.get_retained_outputs()
assert retained_outputs is ()
gx2 = 3 * gy1
return None, gx2, None
return Func().apply((None, x2, None))
def call_func(self, backend_config, x1):
if self.function_node:
return self.call_func_function_node(backend_config, x1)
else:
return self.call_func_function(backend_config, x1)
def test_backprop(self, backend_config):
x2_arr = numpy.array([2, 3], numpy.float32)
gy1_arr = numpy.array([2, 4], numpy.float32)
x2_arr, gy1_arr = backend_config.get_array((x2_arr, gy1_arr))
x2 = chainer.Variable(x2_arr, requires_grad=True)
# Forward
y1, = self.call_func(backend_config, x2)
assert isinstance(y1.array, backend_config.xp.ndarray)
# Backward
y1.grad = gy1_arr
y1.backward()
assert isinstance(x2.grad, backend_config.xp.ndarray)
@testing.parameterize(*testing.product({
'function_node': [True, False],
}))
@testing.backend.inject_backend_tests(
None,
[
# CPU
{},
# CUDA
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestFunctionOutputNone(unittest.TestCase):
def call_func_function(self, backend_config, x1):
forward_xp, backward_xp = _get_expected_xp(backend_config, True)
class Func(chainer.Function):
def forward(self, inputs):
# Inputs
assert isinstance(inputs, tuple)
x1, = inputs
assert isinstance(x1, forward_xp.ndarray)
y2 = x1 * 3 + 2
self.retain_inputs(())
self.retain_outputs((1, 2,))
return None, y2, None
def backward(self, inputs, grad_outputs):
# Retained inputs
assert isinstance(inputs, tuple)
x1, = inputs
assert x1 is None
# Output gradients
assert isinstance(grad_outputs, tuple)
gy1, gy2, gy3 = grad_outputs
assert gy1 is None
assert isinstance(gy2, backward_xp.ndarray)
assert gy3 is None
# Retained outputs
output_data = self.output_data
assert isinstance(output_data, tuple)
assert len(output_data) == 3
y1, y2, y3 = output_data
assert y1 is None
assert isinstance(y2, backward_xp.ndarray)
assert y3 is None
gx1 = 3 * gy2
return gx1,
return Func()(x1)
def call_func_function_node(self, backend_config, x1):
forward_xp, backward_xp = _get_expected_xp(backend_config, False)
class Func(chainer.FunctionNode):
def forward(self, inputs):
# Inputs
x1, = inputs
assert isinstance(x1, forward_xp.ndarray)
y2 = x1 * 3 + 2
self.retain_outputs((1, 2))
return None, y2, None
def backward(self, input_indexes, grad_outputs):
# Input indexes
assert isinstance(input_indexes, tuple)
assert input_indexes == (0,)
# Retained inputs
retained_inputs = self.get_retained_inputs()
assert isinstance(retained_inputs, tuple)
assert retained_inputs == ()
# Output grads
assert isinstance(grad_outputs, tuple)
gy1, gy2, gy3 = grad_outputs
assert gy1 is None
assert isinstance(gy2.array, backward_xp.ndarray)
assert gy3 is None
# Retained outputs
retained_outputs = self.get_retained_outputs()
assert isinstance(retained_outputs, tuple)
y2, y3 = retained_outputs
assert y3 is None
assert isinstance(y2.array, backward_xp.ndarray)
gx1 = 3 * gy2
return gx1,
return Func().apply((x1,))
def call_func(self, backend_config, x1):
if self.function_node:
return self.call_func_function_node(backend_config, x1)
else:
return self.call_func_function(backend_config, x1)
def test_backprop(self, backend_config):
x1_arr = numpy.array([2, 3], numpy.float32)
gy2_arr = numpy.array([2, 4], numpy.float32)
x1_arr, gy2_arr = backend_config.get_array((x1_arr, gy2_arr))
x1 = chainer.Variable(x1_arr, requires_grad=True)
# Forward
y1, y2, y3 = self.call_func(backend_config, x1)
assert y1.array is None
assert isinstance(y2.array, backend_config.xp.ndarray)
assert y3.array is None
# Backward
y2.grad = gy2_arr
y2.backward()
assert isinstance(x1.grad, backend_config.xp.ndarray)
testing.run_module(__name__, __file__)
| 17,057
| 33.321932
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/test_reporter.py
|
import contextlib
import tempfile
import threading
import time
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import configuration
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
class TestReporter(unittest.TestCase):
def test_empty_reporter(self):
reporter = chainer.Reporter()
self.assertEqual(reporter.observation, {})
def test_enter_exit(self):
reporter1 = chainer.Reporter()
reporter2 = chainer.Reporter()
with reporter1:
self.assertIs(chainer.get_current_reporter(), reporter1)
with reporter2:
self.assertIs(chainer.get_current_reporter(), reporter2)
self.assertIs(chainer.get_current_reporter(), reporter1)
def test_enter_exit_threadsafe(self):
# This test ensures reporter.__enter__ correctly stores the reporter
# in the thread-local storage.
def thread_func(reporter, record):
with reporter:
# Sleep for a tiny moment to cause an overlap of the context
# managers.
time.sleep(0.01)
record.append(chainer.get_current_reporter())
record1 = [] # The current repoter in each thread is stored here.
record2 = []
reporter1 = chainer.Reporter()
reporter2 = chainer.Reporter()
thread1 = threading.Thread(
target=thread_func,
args=(reporter1, record1))
thread2 = threading.Thread(
target=thread_func,
args=(reporter2, record2))
thread1.daemon = True
thread2.daemon = True
thread1.start()
thread2.start()
thread1.join()
thread2.join()
self.assertIs(record1[0], reporter1)
self.assertIs(record2[0], reporter2)
def test_scope(self):
reporter1 = chainer.Reporter()
reporter2 = chainer.Reporter()
with reporter1:
observation = {}
with reporter2.scope(observation):
self.assertIs(chainer.get_current_reporter(), reporter2)
self.assertIs(reporter2.observation, observation)
self.assertIs(chainer.get_current_reporter(), reporter1)
self.assertIsNot(reporter2.observation, observation)
def test_add_observer(self):
reporter = chainer.Reporter()
observer = object()
reporter.add_observer('o', observer)
reporter.report({'x': 1}, observer)
observation = reporter.observation
self.assertIn('o/x', observation)
self.assertEqual(observation['o/x'], 1)
self.assertNotIn('x', observation)
def test_add_observers(self):
reporter = chainer.Reporter()
observer1 = object()
reporter.add_observer('o1', observer1)
observer2 = object()
reporter.add_observer('o2', observer2)
reporter.report({'x': 1}, observer1)
reporter.report({'y': 2}, observer2)
observation = reporter.observation
self.assertIn('o1/x', observation)
self.assertEqual(observation['o1/x'], 1)
self.assertIn('o2/y', observation)
self.assertEqual(observation['o2/y'], 2)
self.assertNotIn('x', observation)
self.assertNotIn('y', observation)
self.assertNotIn('o1/y', observation)
self.assertNotIn('o2/x', observation)
def test_report_without_observer(self):
reporter = chainer.Reporter()
reporter.report({'x': 1})
observation = reporter.observation
self.assertIn('x', observation)
self.assertEqual(observation['x'], 1)
class TestKeepGraphOnReportFlag(unittest.TestCase):
@contextlib.contextmanager
def _scope(self, flag):
# If flag is None, return the nop context.
# Otherwise, return the context in which
# keep_graph_on_report is set temporarily.
old = configuration.config.keep_graph_on_report
if flag is not None:
configuration.config.keep_graph_on_report = flag
try:
yield
finally:
configuration.config.keep_graph_on_report = old
def test_keep_graph_default(self):
x = chainer.Variable(numpy.array([1], numpy.float32))
y = functions.sigmoid(x)
reporter = chainer.Reporter()
with self._scope(None):
reporter.report({'y': y})
self.assertIsNone(reporter.observation['y'].creator)
def test_keep_graph(self):
x = chainer.Variable(numpy.array([1], numpy.float32))
y = functions.sigmoid(x)
reporter = chainer.Reporter()
with self._scope(True):
reporter.report({'y': y})
assert reporter.observation['y'].creator is not None
def test_not_keep_graph(self):
x = chainer.Variable(numpy.array([1], numpy.float32))
y = functions.sigmoid(x)
reporter = chainer.Reporter()
with self._scope(False):
reporter.report({'y': y})
self.assertIsNone(reporter.observation['y'].creator)
class TestReport(unittest.TestCase):
def test_report_without_reporter(self):
observer = object()
chainer.report({'x': 1}, observer)
def test_report(self):
reporter = chainer.Reporter()
with reporter:
chainer.report({'x': 1})
observation = reporter.observation
self.assertIn('x', observation)
self.assertEqual(observation['x'], 1)
def test_report_with_observer(self):
reporter = chainer.Reporter()
observer = object()
reporter.add_observer('o', observer)
with reporter:
chainer.report({'x': 1}, observer)
observation = reporter.observation
self.assertIn('o/x', observation)
self.assertEqual(observation['o/x'], 1)
def test_report_with_unregistered_observer(self):
reporter = chainer.Reporter()
observer = object()
with reporter:
with self.assertRaises(KeyError):
chainer.report({'x': 1}, observer)
def test_report_scope(self):
reporter = chainer.Reporter()
observation = {}
with reporter:
with chainer.report_scope(observation):
chainer.report({'x': 1})
self.assertIn('x', observation)
self.assertEqual(observation['x'], 1)
self.assertNotIn('x', reporter.observation)
@backend.inject_backend_tests(
['test_basic', 'test_serialize_array_float', 'test_serialize_array_int'],
[{}, {'use_cuda': True}])
class TestSummary(unittest.TestCase):
def setUp(self):
self.summary = chainer.reporter.Summary()
def test_basic(self, backend_config):
self.summary.add(backend_config.get_array(numpy.array(1, 'f')))
self.summary.add(backend_config.get_array(numpy.array(-2, 'f')))
mean = self.summary.compute_mean()
testing.assert_allclose(mean, numpy.array(-0.5, 'f'))
mean, std = self.summary.make_statistics()
testing.assert_allclose(mean, numpy.array(-0.5, 'f'))
testing.assert_allclose(std, numpy.array(1.5, 'f'))
def test_int(self):
self.summary.add(1)
self.summary.add(2)
self.summary.add(3)
mean = self.summary.compute_mean()
testing.assert_allclose(mean, 2)
mean, std = self.summary.make_statistics()
testing.assert_allclose(mean, 2)
testing.assert_allclose(std, numpy.sqrt(2. / 3.))
def test_float(self):
self.summary.add(1.)
self.summary.add(2.)
self.summary.add(3.)
mean = self.summary.compute_mean()
testing.assert_allclose(mean, 2.)
mean, std = self.summary.make_statistics()
testing.assert_allclose(mean, 2.)
testing.assert_allclose(std, numpy.sqrt(2. / 3.))
def test_weight(self):
self.summary.add(1., 0.5)
self.summary.add(2., numpy.array(0.4))
self.summary.add(3., chainer.Variable(numpy.array(0.3)))
mean = self.summary.compute_mean().array
val = (1 * 0.5 + 2 * 0.4 + 3 * 0.3) / (0.5 + 0.4 + 0.3)
testing.assert_allclose(mean, val)
def check_serialize(self, value1, value2, value3):
xp = chainer.backend.get_array_module(value1, value2, value3)
self.summary.add(value1)
self.summary.add(value2)
summary = chainer.reporter.Summary()
testing.save_and_load_npz(self.summary, summary)
summary.add(value3)
expected_mean = (value1 + value2 + value3) / 3.
expected_std = xp.sqrt(
(value1**2 + value2**2 + value3**2) / 3. - expected_mean**2)
mean = summary.compute_mean()
testing.assert_allclose(mean, expected_mean)
mean, std = summary.make_statistics()
testing.assert_allclose(mean, expected_mean)
testing.assert_allclose(std, expected_std)
def test_serialize_array_float(self, backend_config):
self.check_serialize(
backend_config.get_array(numpy.array(1.5, numpy.float32)),
backend_config.get_array(numpy.array(2.0, numpy.float32)),
# sum of the above two is non-integer
backend_config.get_array(numpy.array(3.5, numpy.float32)))
def test_serialize_array_int(self, backend_config):
self.check_serialize(
backend_config.get_array(numpy.array(1, numpy.int32)),
backend_config.get_array(numpy.array(-2, numpy.int32)),
backend_config.get_array(numpy.array(2, numpy.int32)))
def test_serialize_scalar_float(self):
self.check_serialize(
1.5, 2.0,
# sum of the above two is non-integer
3.5)
def test_serialize_scalar_int(self):
self.check_serialize(1, -2, 2)
def test_serialize_backward_compat(self):
with tempfile.NamedTemporaryFile(delete=False) as f:
# old version does not save anything
numpy.savez(f, dummy=0)
with testing.assert_warns(UserWarning):
chainer.serializers.load_npz(f.name, self.summary)
self.summary.add(2.)
self.summary.add(3.)
mean = self.summary.compute_mean()
testing.assert_allclose(mean, 2.5)
mean, std = self.summary.make_statistics()
testing.assert_allclose(mean, 2.5)
testing.assert_allclose(std, 0.5)
class TestDictSummary(unittest.TestCase):
def setUp(self):
self.summary = chainer.reporter.DictSummary()
def check(self, summary, data):
mean = summary.compute_mean()
self.assertEqual(set(mean.keys()), set(data.keys()))
for name in data.keys():
m = sum(data[name]) / float(len(data[name]))
testing.assert_allclose(mean[name], m)
stats = summary.make_statistics()
self.assertEqual(
set(stats.keys()),
set(data.keys()).union(name + '.std' for name in data.keys()))
for name in data.keys():
m = sum(data[name]) / float(len(data[name]))
s = numpy.sqrt(
sum(x * x for x in data[name]) / float(len(data[name]))
- m * m)
testing.assert_allclose(stats[name], m)
testing.assert_allclose(stats[name + '.std'], s)
def test(self):
self.summary.add({'numpy': numpy.array(3, 'f'), 'int': 1, 'float': 4.})
self.summary.add({'numpy': numpy.array(1, 'f'), 'int': 5, 'float': 9.})
self.summary.add({'numpy': numpy.array(2, 'f'), 'int': 6, 'float': 5.})
self.summary.add({'numpy': numpy.array(3, 'f'), 'int': 5, 'float': 8.})
self.check(self.summary, {
'numpy': (3., 1., 2., 3.),
'int': (1, 5, 6, 5),
'float': (4., 9., 5., 8.),
})
@attr.gpu
def test_cupy(self):
xp = cuda.cupy
self.summary.add({'cupy': xp.array(3, 'f')})
self.summary.add({'cupy': xp.array(1, 'f')})
self.summary.add({'cupy': xp.array(2, 'f')})
self.summary.add({'cupy': xp.array(3, 'f')})
self.check(self.summary, {'cupy': (3., 1., 2., 3.)})
def test_sparse(self):
self.summary.add({'a': 3., 'b': 1.})
self.summary.add({'a': 1., 'b': 5., 'c': 9.})
self.summary.add({'b': 6.})
self.summary.add({'a': 3., 'b': 5., 'c': 8.})
self.check(self.summary, {
'a': (3., 1., 3.),
'b': (1., 5., 6., 5.),
'c': (9., 8.),
})
def test_weight(self):
self.summary.add({'a': (1., 0.5)})
self.summary.add({'a': (2., numpy.array(0.4))})
self.summary.add({'a': (3., chainer.Variable(numpy.array(0.3)))})
mean = self.summary.compute_mean()
val = (1 * 0.5 + 2 * 0.4 + 3 * 0.3) / (0.5 + 0.4 + 0.3)
testing.assert_allclose(mean['a'], val)
with self.assertRaises(ValueError):
self.summary.add({'a': (4., numpy.array([0.5]))})
with self.assertRaises(ValueError):
self.summary.add({'a': (4., chainer.Variable(numpy.array([0.5])))})
def test_serialize(self):
self.summary.add({'numpy': numpy.array(3, 'f'), 'int': 1, 'float': 4.})
self.summary.add({'numpy': numpy.array(1, 'f'), 'int': 5, 'float': 9.})
self.summary.add({'numpy': numpy.array(2, 'f'), 'int': 6, 'float': 5.})
summary = chainer.reporter.DictSummary()
testing.save_and_load_npz(self.summary, summary)
summary.add({'numpy': numpy.array(3, 'f'), 'int': 5, 'float': 8.})
self.check(summary, {
'numpy': (3., 1., 2., 3.),
'int': (1, 5, 6, 5),
'float': (4., 9., 5., 8.),
})
@attr.gpu
def test_serialize_cupy(self):
xp = cuda.cupy
self.summary.add({'cupy': xp.array(3, 'f')})
self.summary.add({'cupy': xp.array(1, 'f')})
self.summary.add({'cupy': xp.array(2, 'f')})
summary = chainer.reporter.DictSummary()
testing.save_and_load_npz(self.summary, summary)
summary.add({'cupy': xp.array(3, 'f')})
self.check(summary, {'cupy': (3., 1., 2., 3.)})
def test_serialize_names_with_slash(self):
self.summary.add({'a/b': 3., '/a/b': 1., 'a/b/': 4.})
self.summary.add({'a/b': 1., '/a/b': 5., 'a/b/': 9.})
self.summary.add({'a/b': 2., '/a/b': 6., 'a/b/': 5.})
summary = chainer.reporter.DictSummary()
testing.save_and_load_npz(self.summary, summary)
summary.add({'a/b': 3., '/a/b': 5., 'a/b/': 8.})
self.check(summary, {
'a/b': (3., 1., 2., 3.),
'/a/b': (1., 5., 6., 5.),
'a/b/': (4., 9., 5., 8.),
})
def test_serialize_overwrite_different_names(self):
self.summary.add({'a': 3., 'b': 1.})
self.summary.add({'a': 1., 'b': 5.})
summary = chainer.reporter.DictSummary()
summary.add({'c': 5.})
testing.save_and_load_npz(self.summary, summary)
self.check(summary, {
'a': (3., 1.),
'b': (1., 5.),
})
def test_serialize_overwrite_rollback(self):
self.summary.add({'a': 3., 'b': 1.})
self.summary.add({'a': 1., 'b': 5.})
with tempfile.NamedTemporaryFile(delete=False) as f:
chainer.serializers.save_npz(f.name, self.summary)
self.summary.add({'a': 2., 'b': 6., 'c': 5.})
self.summary.add({'a': 3., 'b': 4., 'c': 6.})
chainer.serializers.load_npz(f.name, self.summary)
self.summary.add({'a': 3., 'b': 5., 'c': 8.})
self.check(self.summary, {
'a': (3., 1., 3.),
'b': (1., 5., 5.),
'c': (8.,),
})
def test_serialize_backward_compat(self):
with tempfile.NamedTemporaryFile(delete=False) as f:
# old version does not save anything
numpy.savez(f, dummy=0)
with testing.assert_warns(UserWarning):
chainer.serializers.load_npz(f.name, self.summary)
def test_serialize_backward_compat_overwrite(self):
self.summary.add({'a': 3., 'b': 1., 'c': 4.})
self.summary.add({'a': 1., 'b': 5., 'c': 9.})
with tempfile.NamedTemporaryFile(delete=False) as f:
# old version does not save anything
numpy.savez(f, dummy=0)
with testing.assert_warns(UserWarning):
chainer.serializers.load_npz(f.name, self.summary)
self.summary.add({'a': 9., 'b': 2.})
self.summary.add({'a': 6., 'b': 5.})
self.check(self.summary, {
'a': (9., 6.),
'b': (2., 5.),
})
testing.run_module(__name__, __file__)
| 16,729
| 33.494845
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_configuration.py
|
import io
import threading
import unittest
import chainer
from chainer import configuration
from chainer import testing
class TestLocalConfig(unittest.TestCase):
def setUp(self):
self.global_config = configuration.GlobalConfig()
self.config = configuration.LocalConfig(self.global_config)
self.global_config.x = 'global x'
self.global_config.y = 'global y'
self.config.y = 'local y'
self.config.z = 'local z'
def test_attr(self):
self.assertTrue(hasattr(self.config, 'x'))
self.assertEqual(self.config.x, 'global x')
self.assertTrue(hasattr(self.config, 'y'))
self.assertEqual(self.config.y, 'local y')
self.assertTrue(hasattr(self.config, 'z'))
self.assertEqual(self.config.z, 'local z')
self.assertFalse(hasattr(self.config, 'w'))
del self.config.y
self.assertTrue(hasattr(self.config, 'y'))
self.assertEqual(self.config.y, 'global y')
with self.assertRaises(AttributeError):
del self.config.x
def test_multi_thread_attr(self):
def target():
self.config.y = 'local y2'
self.global_config.x = 'global x2'
self.global_config.z = 'global z2'
thread = threading.Thread(target=target)
thread.start()
thread.join()
self.assertEqual(self.config.y, 'local y')
self.assertEqual(self.config.x, 'global x2')
self.assertEqual(self.config.z, 'local z')
self.assertEqual(self.global_config.z, 'global z2')
def test_using_config_local_did_not_exist(self):
with chainer.using_config('x', 'temporary x', self.config):
self.assertEqual(self.config.x, 'temporary x')
self.assertEqual(self.global_config.x, 'global x')
self.assertEqual(self.config.x, 'global x')
self.global_config.x = 'global x2'
self.assertEqual(self.config.x, 'global x2')
def test_using_config_local_existed(self):
with chainer.using_config('y', 'temporary y', self.config):
self.assertEqual(self.config.y, 'temporary y')
self.assertEqual(self.global_config.y, 'global y')
self.assertEqual(self.config.y, 'local y')
def test_print_config(self):
self.config.abc = 1
sio = io.StringIO()
self.config.show(sio)
contents = sio.getvalue()
self.assertEqual(
contents, 'abc 1\nx global x\ny local y\nz local z\n')
def test_print_global_config(self):
self.global_config.abc = 1
sio = io.StringIO()
self.global_config.show(sio)
contents = sio.getvalue()
self.assertEqual(contents, 'abc 1\nx global x\ny global y\n')
testing.run_module(__name__, __file__)
| 2,789
| 32.614458
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_initializer.py
|
import unittest
from chainer import initializer
from chainer import testing
@testing.parameterize(
{'shape': (2, 1), 'expect': (1, 2)},
{'shape': (2, 3, 4), 'expect': (12, 8)},
{'shape': (2, 3, 4, 5), 'expect': (60, 40)})
class TestGetFans(unittest.TestCase):
def test_get_fans(self):
actual = initializer.get_fans(self.shape)
self.assertTupleEqual(self.expect, actual)
@testing.parameterize(
{'shape': ()},
{'shape': (2,)})
class TestGetFansInvalid(unittest.TestCase):
def test_invalid(self):
with self.assertRaises(ValueError):
initializer.get_fans(self.shape)
testing.run_module(__name__, __file__)
| 675
| 22.310345
| 50
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_link_hook.py
|
import time
import unittest
import numpy
import chainer
from chainer import testing
try:
_process_time = time.process_time
except AttributeError:
_process_time = time.clock
class MyLinkHook(chainer.LinkHook):
name = 'MyLinkHook'
def __init__(self):
self.added_args = []
self.deleted_args = []
self.forward_preprocess_args = []
self.forward_postprocess_args = []
def added(self, link):
assert link is None or isinstance(link, chainer.Link)
self.added_args.append((_process_time(), link))
def deleted(self, link):
assert link is None or isinstance(link, chainer.Link)
self.deleted_args.append((_process_time(), link))
def forward_preprocess(self, args):
assert isinstance(args.link, chainer.Link)
assert isinstance(args.forward_name, str)
assert isinstance(args.args, tuple)
assert isinstance(args.kwargs, dict)
assert isinstance(str(args), str)
assert isinstance(repr(args), str)
self.forward_preprocess_args.append((_process_time(), args))
def forward_postprocess(self, args):
assert isinstance(args.link, chainer.Link)
assert isinstance(args.forward_name, str)
assert isinstance(args.args, tuple)
assert isinstance(args.kwargs, dict)
assert hasattr(args, 'out')
assert isinstance(str(args), str)
assert isinstance(repr(args), str)
self.forward_postprocess_args.append((_process_time(), args))
class MyModel(chainer.Chain):
def __init__(self, w):
super(MyModel, self).__init__()
with self.init_scope():
self.l1 = chainer.links.Linear(3, 2, initialW=w)
def forward(self, x, test1, test2):
return self.l1(x)
class TestLinkHook(unittest.TestCase):
def _create_model_and_data(self):
x = numpy.array([[3, 1, 2]], numpy.float32)
w = numpy.array([[1, 3, 2], [6, 4, 5]], numpy.float32)
dot = numpy.dot(x, w.T)
model = MyModel(w)
return model, x, dot
def test_name(self):
chainer.LinkHook().name == 'LinkHook'
def test_global_hook(self):
model, x, dot = self._create_model_and_data()
hook = MyLinkHook()
with hook:
model(chainer.Variable(x), 'foo', test2='bar')
# added
assert len(hook.added_args) == 1
assert hook.added_args[0][1] is None
# deleted
assert len(hook.added_args) == 1
assert hook.deleted_args[0][1] is None
# forward_preprocess
assert len(hook.forward_preprocess_args) == 2
# - MyModel
args = hook.forward_preprocess_args[0][1]
assert args.link is model
assert args.forward_name == 'forward'
assert len(args.args) == 2
numpy.testing.assert_array_equal(args.args[0].data, x)
assert args.args[1] == 'foo'
assert len(args.kwargs) == 1
assert args.kwargs['test2'] == 'bar'
# - Linear
args = hook.forward_preprocess_args[1][1]
assert args.link is model.l1
assert args.forward_name == 'forward'
# forward_postprocess
assert len(hook.forward_postprocess_args) == 2
# - Linear
args = hook.forward_postprocess_args[0][1]
assert args.link is model.l1
assert args.forward_name == 'forward'
# - MyModel
args = hook.forward_postprocess_args[1][1]
assert args.link is model
assert args.forward_name == 'forward'
assert len(args.args) == 2
numpy.testing.assert_array_equal(args.args[0].data, x)
assert args.args[1] == 'foo'
assert len(args.kwargs) == 1
assert args.kwargs['test2'] == 'bar'
numpy.testing.assert_array_equal(args.out.data, dot)
# Test callback call order
time_sequence = [
hook.added_args[0][0],
hook.forward_preprocess_args[0][0],
hook.forward_preprocess_args[1][0],
hook.forward_postprocess_args[0][0],
hook.forward_postprocess_args[1][0],
hook.deleted_args[0][0]]
assert sorted(time_sequence) == time_sequence
def _check_local_hook(self, add_hook_name, delete_hook_name):
model, x, dot = self._create_model_and_data()
hook = MyLinkHook()
model.add_hook(hook, add_hook_name)
model(chainer.Variable(x), 'foo', test2='bar')
model.delete_hook(delete_hook_name)
# added
assert len(hook.added_args) == 1
assert hook.added_args[0][1] is model
# deleted
assert len(hook.added_args) == 1
assert hook.deleted_args[0][1] is model
# forward_preprocess
assert len(hook.forward_preprocess_args) == 1
# - MyModel
args = hook.forward_preprocess_args[0][1]
assert args.link is model
assert args.forward_name == 'forward'
assert len(args.args) == 2
numpy.testing.assert_array_equal(args.args[0].data, x)
assert args.args[1] == 'foo'
assert len(args.kwargs) == 1
assert args.kwargs['test2'] == 'bar'
# forward_postprocess
assert len(hook.forward_postprocess_args) == 1
# - MyModel
args = hook.forward_postprocess_args[0][1]
assert args.link is model
assert args.forward_name == 'forward'
assert len(args.args) == 2
numpy.testing.assert_array_equal(args.args[0].data, x)
assert args.args[1] == 'foo'
assert len(args.kwargs) == 1
assert args.kwargs['test2'] == 'bar'
numpy.testing.assert_array_equal(args.out.data, dot)
def test_local_hook_named(self):
self._check_local_hook('myhook', 'myhook')
def test_local_hook_unnamed(self):
self._check_local_hook(None, 'MyLinkHook')
def test_addhook_returns_self(self):
model, x, dot = self._create_model_and_data()
hook = MyLinkHook()
ret = model.add_hook(hook)
assert ret is model
def test_global_hook_delete(self):
# Deleted hook should not be called
model, x, dot = self._create_model_and_data()
hook = MyLinkHook()
with hook:
pass
model(chainer.Variable(x), 'foo', test2='bar')
assert len(hook.added_args) == 1
assert len(hook.deleted_args) == 1
assert len(hook.forward_preprocess_args) == 0
assert len(hook.forward_postprocess_args) == 0
def test_local_hook_delete(self):
# Deleted hook should not be called
model, x, dot = self._create_model_and_data()
hook = MyLinkHook()
model.add_hook(hook)
model.delete_hook('MyLinkHook')
model(chainer.Variable(x), 'foo', test2='bar')
assert len(hook.added_args) == 1
assert len(hook.deleted_args) == 1
assert len(hook.forward_preprocess_args) == 0
assert len(hook.forward_postprocess_args) == 0
testing.run_module(__name__, __file__)
| 7,011
| 30.872727
| 69
|
py
|
chainer
|
chainer-master/tests/chainer_tests/test_backprop.py
|
import unittest
import mock
import numpy as np
import pytest
import chainer
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
import chainerx
class TestBackward(unittest.TestCase):
def test_no_output(self):
chainer.backward([])
chainer.backward([], [])
def check_multiple_output_1arg(self, xp, skip_retain_grad_test=False):
x = chainer.Variable(xp.array([1, 2], np.float32))
h = x * 2
y0 = h * 3
y1 = h * 4
y0.grad = xp.array([1, 10], np.float32)
y1.grad = xp.array([100, 1000], np.float32)
chainer.backward([y0, y1])
testing.assert_allclose(x.grad, np.array([806, 8060], np.float32))
if skip_retain_grad_test:
return
assert y0.grad is None
assert y1.grad is None
def check_multiple_output_2args(self, xp, skip_retain_grad_test=False):
x = chainer.Variable(xp.array([1, 2], np.float32))
h = x * 2
y0 = h * 3
y1 = h * 4
gy0 = chainer.Variable(xp.array([1, 10], np.float32))
gy1 = chainer.Variable(xp.array([100, 1000], np.float32))
chainer.backward([y0, y1], [gy0, gy1])
testing.assert_allclose(x.grad, np.array([806, 8060], np.float32))
if skip_retain_grad_test:
return
assert y0.grad is None
assert y1.grad is None
def test_multiple_output_cpu(self):
self.check_multiple_output_1arg(np)
self.check_multiple_output_2args(np)
@attr.gpu
def test_multiple_output_gpu(self):
self.check_multiple_output_1arg(cuda.cupy)
self.check_multiple_output_2args(cuda.cupy)
@attr.chainerx
def test_multiple_output_chainerx_partially_ok(self):
self.check_multiple_output_1arg(
chainerx, skip_retain_grad_test=True)
self.check_multiple_output_2args(
chainerx, skip_retain_grad_test=True)
# TODO(kataoka): Variable.backward with ChainerX backend unexpectedly
# behaves like retain_grad=True
@pytest.mark.xfail(strict=True)
@attr.chainerx
def test_multiple_output_1arg_chainerx(self):
self.check_multiple_output_1arg(chainerx)
# TODO(kataoka): Variable.backward with ChainerX backend unexpectedly
# behaves like retain_grad=True
@pytest.mark.xfail(strict=True)
@attr.chainerx
def test_multiple_output_2args_chainerx(self):
self.check_multiple_output_2args(chainerx)
def test_multiple_output_call_count(self):
x = chainer.Variable(np.array([1, 2], np.float32))
f = chainer.FunctionNode()
f.forward = mock.MagicMock(
side_effect=lambda xs: tuple(x * 2 for x in xs))
f.backward = mock.MagicMock(
side_effect=lambda _, gys: tuple(gy * 2 for gy in gys))
h, = f.apply((x,))
y0 = h * 3
y1 = h * 4
y0.grad = np.array([1, 10], np.float32)
y1.grad = np.array([100, 1000], np.float32)
chainer.backward([y0, y1])
testing.assert_allclose(x.grad, np.array([806, 8060], np.float32))
assert f.backward.call_count == 1
def test_warn_no_grad(self):
x = chainer.Variable(np.array(4, np.float32))
x.grad = np.array(3, np.float32)
y = x * 2
with testing.assert_warns(RuntimeWarning):
chainer.backward([y])
testing.assert_allclose(x.grad, np.array(3, np.float32))
assert y.grad is None
def test_duplicate_outputs(self):
x = chainer.Variable(np.array(0, np.float32))
y = chainer.functions.identity(x)
y.grad = np.array(3, np.float32)
with testing.assert_warns(RuntimeWarning):
chainer.backward([y, y])
# 6 might be expected, but y.grad is used only once
testing.assert_allclose(x.grad, np.array(3, np.float32))
# see also test_function_node.TestGradTypeCheck
class TestBackwardTypeCheck(unittest.TestCase):
def _rand(self):
return np.random.uniform(-1, 1, (2, 3)).astype(np.float32)
def test_type_check(self):
x = chainer.Variable(self._rand())
y = x * x
y.grad = self._rand()
gy = chainer.Variable(self._rand())
with self.assertRaises(TypeError):
chainer.backward(y)
with self.assertRaises(TypeError):
chainer.backward([y], gy)
chainer.backward([y])
chainer.backward([y], [gy])
# see also test_function_node.TestGradValueCheck
class TestBackwardValueCheck(unittest.TestCase):
def test_length_check(self):
x = chainer.Variable(np.array(3, np.float32))
y = chainer.functions.identity(x)
gy = chainer.Variable(np.array(7, np.float32))
with self.assertRaises(ValueError):
chainer.backward([y], [])
with self.assertRaises(ValueError):
chainer.backward([y], [gy, gy])
with self.assertRaises(ValueError):
chainer.backward([], [gy])
with self.assertRaises(ValueError):
chainer.backward([y, y], [gy])
chainer.backward([y], [gy])
testing.run_module(__name__, __file__)
| 5,140
| 31.745223
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/backends_tests/test_chainerx.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
import chainerx
@testing.inject_backend_tests(
None,
[
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestChainerxDevice(unittest.TestCase):
def check_device(self, device, backend_config):
assert isinstance(device, backend.ChainerxDevice)
assert device.xp is chainerx
assert device.supported_array_types == (chainerx.ndarray,)
assert device.name == backend_config.chainerx_device
assert str(device) == backend_config.chainerx_device
assert isinstance(hash(device), int) # hashable
# fallback_device
chainerx_device_comps = backend_config.chainerx_device.split(':')
if chainerx_device_comps[0] == 'native':
assert isinstance(device.fallback_device, backend.CpuDevice)
elif chainerx_device_comps[0] == 'cuda':
assert isinstance(device.fallback_device, backend.GpuDevice)
assert (
device.fallback_device.device.id
== int(chainerx_device_comps[1]))
else:
# Currently no such ChainerX device is known in Chainer
assert False
def test_init(self, backend_config):
name = backend_config.chainerx_device
chx_device = chainerx.get_device(name)
device = backend.ChainerxDevice(chx_device)
self.check_device(device, backend_config)
assert device.device is chx_device
def test_from_array(self, backend_config):
arr = backend_config.get_array(numpy.ndarray((2,), numpy.float32))
# Test precondition check
assert arr.device.name == backend_config.chainerx_device
expected_device = backend_config.device
# ChainerxDevice.from_array
device = backend.ChainerxDevice.from_array(arr)
self.check_device(device, backend_config)
assert device == expected_device
# backend.get_device_from_array
device = backend.get_device_from_array(arr)
self.check_device(device, backend_config)
assert device == expected_device
def test_from_fallback_device(self, backend_config):
# Preparation: it depends on ChainerxDevice.fallback_device
tmp_device = backend.ChainerxDevice(
chainerx.get_device(backend_config.chainerx_device))
fallback_device = tmp_device.fallback_device
# Test
device = backend.ChainerxDevice.from_fallback_device(fallback_device)
self.check_device(device, backend_config)
assert device.fallback_device == fallback_device
@testing.inject_backend_tests(
None,
[
{},
{'use_cuda': True},
])
class TestChainerxDeviceFromArrayInvalidArray(unittest.TestCase):
def test_from_array(self, backend_config):
arr = backend_config.get_array(numpy.ndarray((2,), numpy.float32))
device = backend.ChainerxDevice.from_array(arr)
assert device is None
@testing.parameterize(*testing.product(
{
'value': [None, 1, ()],
}))
class TestChainerxDeviceFromArrayInvalidValue(unittest.TestCase):
def test_from_array(self):
device = backend.ChainerxDevice.from_array(self.value)
assert device is None
@testing.inject_backend_tests(
None,
[
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestChainerxDeviceUse(unittest.TestCase):
def test_use(self, backend_config):
device = chainer.get_device(backend_config.chainerx_device)
with chainerx.using_device('native:1'):
device.use()
assert device.device is chainerx.get_default_device()
@chainer.testing.inject_backend_tests(
None,
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
@attr.chainerx
class TestFromToChainerx(unittest.TestCase):
def check_equal_memory_shared(self, arr1, arr2):
# Check that the two arrays share the internal memory.
numpy.testing.assert_array_equal(
backend.CpuDevice().send(arr1), backend.CpuDevice().send(arr2))
with chainer.using_device(backend.get_device_from_array(arr1)):
arr1 += 2
numpy.testing.assert_array_equal(
backend.CpuDevice().send(arr1), backend.CpuDevice().send(arr2))
with chainer.using_device(backend.get_device_from_array(arr1)):
arr1 -= 2
def test_from_chx(self, backend_config):
arr = backend_config.get_array(numpy.ones((2, 3), numpy.float32))
arr_converted = backend.from_chx(arr)
src_device = backend_config.device
if src_device.xp is chainerx:
dst_xp = src_device.fallback_device.xp
assert isinstance(arr_converted, dst_xp.ndarray)
if dst_xp is cuda.cupy:
assert arr_converted.device.id == src_device.device.index
else:
assert arr is arr_converted
with backend_config:
self.check_equal_memory_shared(arr, arr_converted)
def test_to_chx(self, backend_config):
arr = backend_config.get_array(numpy.ones((2, 3), numpy.float32))
arr_converted = backend.to_chx(arr)
src_device = backend_config.device
assert isinstance(arr_converted, chainerx.ndarray)
if src_device.xp is chainerx:
assert arr is arr_converted
elif src_device.xp is cuda.cupy:
assert arr.device.id == arr_converted.device.index
self.check_equal_memory_shared(arr, arr_converted)
@chainer.testing.inject_backend_tests( # backend_config2
None,
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
@chainer.testing.inject_backend_tests( # backend_config1
None,
[
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestChainerxIsArraySupported(unittest.TestCase):
def test_is_array_supported(self, backend_config1, backend_config2):
target = backend_config1.device # backend.ChainerxDevice
arr = backend_config2.get_array(numpy.ndarray((2,), numpy.float32))
device = backend_config2.device
if (isinstance(device, backend.ChainerxDevice)
and device.device == target.device):
assert target.is_array_supported(arr)
else:
assert not target.is_array_supported(arr)
testing.run_module(__name__, __file__)
| 7,449
| 33.651163
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/backends_tests/test_intel64.py
|
import unittest
import numpy
from chainer import backend
from chainer.backends import intel64
from chainer import testing
@testing.attr.ideep
class TestIntel64Device(unittest.TestCase):
def check_device(self, device):
assert device.xp is numpy
assert device.supported_array_types == (numpy.ndarray, intel64.mdarray)
assert device.name == '@intel64'
assert str(device) == '@intel64'
assert isinstance(hash(device), int) # hashable
def test_init(self):
device = backend.Intel64Device()
self.check_device(device)
def test_from_array(self):
arr = intel64.ideep.array(numpy.ndarray((2,), numpy.float32))
# Test precondition check
assert isinstance(arr, intel64.mdarray)
expected_device = backend.Intel64Device()
device = backend.Intel64Device.from_array(arr)
self.check_device(device)
assert device == expected_device
device = backend.get_device_from_array(arr)
self.check_device(device)
assert device == expected_device
@testing.backend.inject_backend_tests(
None,
[
{},
{'use_cuda': True},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
])
class TestIntel64DeviceFromArrayInvalidArray(unittest.TestCase):
def test_from_array(self, backend_config):
arr = backend_config.get_array(numpy.ndarray((2,), numpy.float32))
device = backend.Intel64Device.from_array(arr)
assert device is None
@testing.parameterize(*testing.product(
{
'value': [None, 1, ()],
}))
class TestIntel64DeviceFromArrayInvalidValue(unittest.TestCase):
def test_from_array(self):
device = backend.Intel64Device.from_array(self.value)
assert device is None
@testing.backend.inject_backend_tests( # backend_config2
None,
[
{},
{'use_cuda': True},
{'use_ideep': 'always'},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
])
@testing.backend.inject_backend_tests( # backend_config1
None,
[
{'use_ideep': 'always'},
])
class TestIntel64DeviceIsArraySupported(unittest.TestCase):
def test_is_array_supported(self, backend_config1, backend_config2):
target = backend_config1.device # backend.Intel64Device
arr = backend_config2.get_array(numpy.ndarray((2,), numpy.float32))
device = backend_config2.device
if isinstance(device, (backend.CpuDevice, backend.Intel64Device)):
assert target.is_array_supported(arr)
else:
assert not target.is_array_supported(arr)
testing.run_module(__name__, __file__)
| 2,676
| 27.478723
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/backends_tests/test_cpu.py
|
import unittest
import numpy
from chainer import backend
from chainer import testing
class TestCpuDevice(unittest.TestCase):
def test_hashable(self):
assert isinstance(hash(backend.CpuDevice()), int)
class TestCpuDeviceFromArray(unittest.TestCase):
def check_device(self, device):
assert device.xp is numpy
assert device.supported_array_types == (numpy.ndarray,)
assert device.name == '@numpy'
assert str(device) == '@numpy'
def test_init(self):
device = backend.CpuDevice()
self.check_device(device)
def test_from_array(self):
arr = numpy.ndarray((2,), numpy.float32)
expected_device = backend.CpuDevice()
device = backend.CpuDevice.from_array(arr)
self.check_device(device)
assert device == expected_device
device = backend.get_device_from_array(arr)
self.check_device(device)
assert device == expected_device
@testing.backend.inject_backend_tests(
None,
[
{'use_cuda': True},
{'use_ideep': 'always'},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
])
class TestCpuDeviceFromArrayInvalidArray(unittest.TestCase):
def test_from_array(self, backend_config):
arr = backend_config.get_array(numpy.ndarray((2,), numpy.float32))
device = backend.CpuDevice.from_array(arr)
assert device is None
@testing.parameterize(*testing.product(
{
'value': [None, 1, (), numpy.float32(1)],
}))
class TestCpuDeviceFromArrayInvalidValue(unittest.TestCase):
def test_from_array(self):
device = backend.CpuDevice.from_array(self.value)
assert device is None
@testing.backend.inject_backend_tests( # backend_config2
None,
[
{},
{'use_cuda': True},
{'use_ideep': 'always'},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
])
@testing.backend.inject_backend_tests( # backend_config1
None,
[
{},
])
class TestCpuIsArraySupported(unittest.TestCase):
def test_is_array_supported(self, backend_config1, backend_config2):
target = backend_config1.device # backend.CpuDevice
arr = backend_config2.get_array(numpy.ndarray((2,), numpy.float32))
device = backend_config2.device
if isinstance(device, backend.CpuDevice):
assert target.is_array_supported(arr)
else:
assert not target.is_array_supported(arr)
testing.run_module(__name__, __file__)
| 2,524
| 25.861702
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/backends_tests/test_cuda.py
|
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
import warnings
import numpy
import pytest
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
class TestDummyDeviceType(unittest.TestCase):
def test_int(self):
assert int(cuda.DummyDeviceType()) == -1
def test_eq(self):
assert cuda.DummyDeviceType() == cuda.DummyDeviceType()
def test_ne(self):
assert cuda.DummyDeviceType() != 1
_builtins_available = False
try:
import builtins
_builtins_available = True
except ImportError:
pass
class TestCudaModuleAliasForBackwardCompatibility(unittest.TestCase):
def _check(self, code):
temp_dir = tempfile.mkdtemp()
try:
script_path = os.path.join(temp_dir, 'script.py')
with open(script_path, 'w') as f:
f.write(code)
proc = subprocess.Popen(
[sys.executable, script_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdoutdata, stderrdata = proc.communicate()
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
ret = proc.returncode
assert ret == 0, (
'Import test failed.\n'
'[code]:\n{}\n'
'[stdout]:{!r}\n'
'[stderr]:{!r}'.format(
code, stdoutdata, stderrdata))
def test_import1(self):
self._check('from chainer import cuda; cuda.get_device_from_id')
def test_import2(self):
self._check('import chainer.cuda; chainer.cuda.get_device_from_id')
def test_import3(self):
self._check('import chainer; chainer.cuda.get_device_from_id')
class TestCuda(unittest.TestCase):
def test_get_dummy_device(self):
assert cuda.get_device_from_id(None) is cuda.DummyDevice
@attr.gpu
def test_get_device_from_id_for_numpy_int(self):
assert cuda.get_device_from_id(numpy.int64(0)) == cuda.Device(0)
def test_get_device_from_array_for_numpy_int(self):
assert cuda.get_device_from_array(numpy.int64(0)) is cuda.DummyDevice
@attr.gpu
def test_get_device_for_empty_array(self):
x = cuda.get_device_from_array(cuda.cupy.array([]).reshape((0, 10)))
# TODO(okuta): Only check `assert x == cuda.Device(0)`
# when cupy/cupy#946 is merged
assert x == cuda.Device(0) or x == cuda.DummyDevice
@attr.gpu
@unittest.skipUnless(
six.PY3, 'Python2.7 has a bug in catch_warnings, so this test is '
'skipped for Python2.7')
def test_get_device_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
cuda.get_device(cuda.cupy.array([1]))
assert len(w) == 1
assert w[0].category is DeprecationWarning
assert ('get_device is deprecated. Please use get_device_from_id'
' or get_device_from_array instead.' in str(w[0].message))
@attr.gpu
def test_get_device_from_id(self):
assert cuda.get_device_from_id(0) == cuda.Device(0)
@attr.gpu
def test_get_device_from_array(self):
arr = cuda.cupy.array([0])
assert cuda.get_device_from_array(arr) == cuda.Device(0)
@attr.gpu
def test_get_device_for_int(self):
with testing.assert_warns(DeprecationWarning):
device = cuda.get_device(0)
assert device == cuda.Device(0)
@attr.gpu
@unittest.skipUnless(_builtins_available,
'builtins module is not available')
def test_get_device_from_id_for_builtin_int(self):
# builtins.int is from future package and it is different
# from builtin int/long on Python 2.
assert cuda.get_device_from_id(builtins.int(0)) == cuda.Device(0)
@attr.gpu
@unittest.skipUnless(_builtins_available,
'builtins module is not available')
def test_get_device_for_builtin_int(self):
# builtins.int is from future package and it is different
# from builtin int/long on Python 2.
with testing.assert_warns(DeprecationWarning):
device = cuda.get_device(builtins.int(0))
assert device == cuda.Device(0)
@attr.gpu
def test_get_device_for_device(self):
device = cuda.get_device_from_id(0)
with testing.assert_warns(DeprecationWarning):
assert cuda.get_device(device) is device
def test_to_gpu_unavailable(self):
x = numpy.array([1])
if not cuda.available:
with self.assertRaises(RuntimeError):
cuda.to_gpu(x)
def test_cupy_is_not_none(self):
assert cuda.cupy is not None
@testing.parameterize(
{'c_contiguous': True},
{'c_contiguous': False},
)
class TestToCPU(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3))
def test_numpy_array(self):
y = cuda.to_cpu(self.x)
assert self.x is y # Do not copy
@attr.gpu
def test_cupy_array(self):
x = cuda.to_gpu(self.x)
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
y = cuda.to_cpu(x)
assert isinstance(y, numpy.ndarray)
numpy.testing.assert_array_equal(self.x, y)
@attr.multi_gpu(2)
def test_cupy_array2(self):
with cuda.Device(0):
x = cuda.to_gpu(self.x)
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
with cuda.Device(1):
y = cuda.to_cpu(x)
assert isinstance(y, numpy.ndarray)
numpy.testing.assert_array_equal(self.x, y)
@attr.gpu
def test_numpy_array_async(self):
y = cuda.to_cpu(self.x, stream=cuda.Stream())
assert isinstance(y, numpy.ndarray)
assert self.x is y # Do not copy
@attr.gpu
def test_cupy_array_async1(self):
x = cuda.to_gpu(self.x)
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
y = cuda.to_cpu(x, stream=cuda.Stream.null)
assert isinstance(y, numpy.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
@attr.multi_gpu(2)
def test_cupy_array_async2(self):
x = cuda.to_gpu(self.x, device=1)
with x.device:
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
y = cuda.to_cpu(x, stream=cuda.Stream.null)
assert isinstance(y, numpy.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
def test_single_none(self):
assert cuda.to_cpu(None) is None
def _check_list_tuple(self, typ):
assert typ in (list, tuple)
a = numpy.random.uniform(-1, 1, (0,))
b = numpy.random.uniform(-1, 1, (2, 3))
c = cuda.cupy.random.uniform(-1, 1, (0,))
d = cuda.cupy.random.uniform(-1, 1, (2, 2))
xs = typ([a, b, c, d, None, a, b, None, c, d])
xs_cpu = cuda.to_cpu(xs)
assert isinstance(xs_cpu, typ)
assert len(xs) == len(xs_cpu)
for i in (0, 1, 2, 3, 5, 6, 8, 9):
assert isinstance(xs_cpu[i], numpy.ndarray)
cuda.cupy.testing.assert_array_equal(xs[i], xs_cpu[i])
assert xs_cpu[0] is a
assert xs_cpu[1] is b
assert xs_cpu[2] is xs_cpu[8]
assert xs_cpu[3] is xs_cpu[9]
assert xs_cpu[4] is None
assert xs_cpu[5] is a
assert xs_cpu[6] is b
assert xs_cpu[7] is None
@attr.gpu
def test_list(self):
self._check_list_tuple(list)
@attr.gpu
def test_tuple(self):
self._check_list_tuple(tuple)
def test_variable(self):
x = chainer.Variable(self.x)
with self.assertRaises(TypeError):
cuda.to_cpu(x)
@testing.parameterize(*testing.product({
'dtype': [
numpy.bool_, numpy.uint8, numpy.int8, numpy.uint16,
numpy.int16, numpy.uint32, numpy.int32, numpy.uint64,
numpy.int64, numpy.float16, numpy.float32, numpy.float64,
numpy.complex_],
}))
class TestToCPUScalar(unittest.TestCase):
def test_numpy_scalar(self):
dtype = self.dtype
if dtype is numpy.bool_:
x = dtype(True)
elif issubclass(dtype, numpy.complex_):
x = dtype(3.2 - 2.4j)
elif issubclass(dtype, numpy.integer):
x = dtype(3)
elif issubclass(dtype, numpy.floating):
x = dtype(3.2)
else:
assert False
y = cuda.to_cpu(x)
assert isinstance(y, numpy.ndarray)
assert y.shape == ()
assert y.dtype == dtype
assert y == x
@attr.cudnn
class TestWorkspace(unittest.TestCase):
def setUp(self):
self.space = cuda.get_max_workspace_size()
def tearDown(self):
cuda.set_max_workspace_size(self.space)
def test_size(self):
size = 1024
cuda.set_max_workspace_size(size)
assert size == cuda.get_max_workspace_size()
@testing.parameterize(*(testing.product({
'c_contiguous': [True],
'device_dtype': [int, numpy.uint8, numpy.int8, numpy.uint16,
numpy.int16, numpy.uint32, numpy.int32, numpy.uint64,
numpy.int64]
}) + testing.product({
'c_contiguous': [False],
'device_dtype': [int]
}))
)
class TestToGPU(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3))
if not self.c_contiguous:
self.x = self.x.T
@attr.gpu
def test_numpy_array(self):
y = cuda.to_gpu(self.x)
assert isinstance(y, cuda.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
@attr.gpu
def test_cupy_array1(self):
x = cuda.to_gpu(self.x)
y = cuda.to_gpu(x)
assert isinstance(y, cuda.ndarray)
assert x is y # Do not copy
@attr.multi_gpu(2)
def test_cupy_array2(self):
x = cuda.to_gpu(self.x, device=self.device_dtype(0))
with x.device:
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
y = cuda.to_gpu(x, device=self.device_dtype(1))
assert isinstance(y, cuda.ndarray)
assert int(y.device) == 1
@attr.gpu
def test_numpy_array_async(self):
with testing.assert_warns(DeprecationWarning):
y = cuda.to_gpu(self.x, stream=cuda.Stream.null)
assert isinstance(y, cuda.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
@attr.multi_gpu(2)
def test_numpy_array_async2(self):
with testing.assert_warns(DeprecationWarning):
y = cuda.to_gpu(self.x, device=self.device_dtype(1),
stream=cuda.Stream.null)
assert isinstance(y, cuda.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
assert int(y.device) == 1
@attr.multi_gpu(2)
def test_numpy_array_async3(self):
with cuda.Device(1):
with testing.assert_warns(DeprecationWarning):
y = cuda.to_gpu(self.x, stream=cuda.Stream.null)
assert isinstance(y, cuda.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
assert int(y.device) == 1
@attr.gpu
def test_cupy_array_async1(self):
x = cuda.to_gpu(self.x)
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
with testing.assert_warns(DeprecationWarning):
y = cuda.to_gpu(x, stream=cuda.Stream())
assert isinstance(y, cuda.ndarray)
assert x is y # Do not copy
cuda.cupy.testing.assert_array_equal(x, y)
@attr.multi_gpu(2)
def test_cupy_array_async2(self):
x = cuda.to_gpu(self.x, device=self.device_dtype(0))
with x.device:
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
with testing.assert_warns(DeprecationWarning):
y = cuda.to_gpu(x, device=self.device_dtype(1),
stream=cuda.Stream.null)
assert isinstance(y, cuda.ndarray)
assert x is not y # Do copy
cuda.cupy.testing.assert_array_equal(x, y)
@attr.multi_gpu(2)
def test_cupy_array_async3(self):
with cuda.Device(0):
x = cuda.to_gpu(self.x)
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
with cuda.Device(1):
with testing.assert_warns(DeprecationWarning):
y = cuda.to_gpu(x, stream=cuda.Stream.null)
assert isinstance(y, cuda.ndarray)
assert x is not y # Do copy
cuda.cupy.testing.assert_array_equal(x, y)
@attr.gpu
def test_single_none(self):
assert cuda.to_gpu(None) is None
def _check_list_tuple(self, typ):
assert typ in (list, tuple)
a = numpy.random.uniform(-1, 1, (0,))
b = numpy.random.uniform(-1, 1, (2, 3))
c = cuda.cupy.random.uniform(-1, 1, (0,))
d = cuda.cupy.random.uniform(-1, 1, (2, 2))
xs = typ([a, b, c, d, None, a, b, None, c, d])
xs_gpu = cuda.to_gpu(xs)
assert isinstance(xs_gpu, typ)
assert len(xs) == len(xs_gpu)
for i in (0, 1, 2, 3, 5, 6, 8, 9):
assert isinstance(xs_gpu[i], cuda.cupy.ndarray)
cuda.cupy.testing.assert_array_equal(xs[i], xs_gpu[i])
assert xs_gpu[0] is xs_gpu[5]
assert xs_gpu[1] is xs_gpu[6]
assert xs_gpu[2] is c
assert xs_gpu[3] is d
assert xs_gpu[4] is None
assert xs_gpu[7] is None
assert xs_gpu[8] is c
assert xs_gpu[9] is d
@attr.gpu
def test_list(self):
self._check_list_tuple(list)
@attr.gpu
def test_tuple(self):
self._check_list_tuple(tuple)
@attr.gpu
def test_variable_gpu(self):
x = chainer.Variable(self.x)
with self.assertRaises(TypeError):
cuda.to_gpu(x)
@testing.parameterize(*testing.product({
'dtype': [
numpy.bool_, numpy.uint8, numpy.int8, numpy.uint16,
numpy.int16, numpy.uint32, numpy.int32, numpy.uint64,
numpy.int64, numpy.float16, numpy.float32, numpy.float64,
numpy.complex_],
}))
class TestToGPUScalar(unittest.TestCase):
@attr.gpu
def test_numpy_scalar(self):
dtype = self.dtype
if dtype is numpy.bool_:
x = dtype(True)
elif issubclass(dtype, numpy.complex_):
x = dtype(3.2 - 2.4j)
elif issubclass(dtype, numpy.integer):
x = dtype(3)
elif issubclass(dtype, numpy.floating):
x = dtype(3.2)
else:
assert False
y = cuda.to_gpu(x)
assert isinstance(y, cuda.ndarray)
assert y.shape == ()
assert y.dtype == dtype
assert y == x
@testing.backend.inject_backend_tests(
None,
[
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
])
class TestGpuDevice(unittest.TestCase):
def check_device(self, device, backend_config):
device_id = backend_config.cuda_device
assert isinstance(device, backend.GpuDevice)
assert isinstance(device.device, cuda.cupy.cuda.Device)
assert device.device.id == device_id
assert isinstance(hash(device), int) # hashable
assert device.xp is cuda.cupy
assert device.supported_array_types == (cuda.ndarray,)
assert device.name == '@cupy:{}'.format(device_id)
assert str(device) == '@cupy:{}'.format(device_id)
def test_init(self, backend_config):
cuda_device = cuda.cupy.cuda.Device(backend_config.cuda_device)
device = backend.GpuDevice(cuda_device)
self.check_device(device, backend_config)
def test_from_array(self, backend_config):
with cuda.Device(backend_config.cuda_device):
arr = cuda.ndarray((), numpy.float32)
# Test precondition check
assert arr.device.id == backend_config.cuda_device
device = backend.GpuDevice.from_array(arr)
self.check_device(device, backend_config)
assert device == backend.GpuDevice.from_device_id(
backend_config.cuda_device)
def test_get_device_from_array(self, backend_config):
with cuda.Device(backend_config.cuda_device):
arr = cuda.ndarray((), numpy.float32)
# Test precondition check
assert arr.device.id == backend_config.cuda_device
expected_device = backend_config.device
device = backend.GpuDevice.from_array(arr)
self.check_device(device, backend_config)
assert device == expected_device
device = backend.get_device_from_array(arr)
self.check_device(device, backend_config)
assert device == expected_device
@testing.backend.inject_backend_tests(
None,
[
{},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
])
@attr.gpu
class TestGpuDeviceFromArrayInvalidArray(unittest.TestCase):
def test_from_array(self, backend_config):
arr = backend_config.get_array(numpy.ndarray((2,), numpy.float32))
device = backend.GpuDevice.from_array(arr)
assert device is None
@testing.parameterize(*testing.product(
{
'value': [None, 1, ()],
}))
@attr.gpu
class TestGpuDeviceFromArrayInvalidValue(unittest.TestCase):
def test_from_array(self):
device = backend.GpuDevice.from_array(self.value)
assert device is None
@testing.parameterize(*testing.product(
{
'device_id': [0, 1, 99999, numpy.int32(1)],
}))
@attr.gpu
class TestGpuDeviceFromDeviceId(unittest.TestCase):
def test_from_device_id(self):
device = backend.GpuDevice.from_device_id(self.device_id)
assert isinstance(device, backend.GpuDevice)
device_spec = '@cupy:{}'.format(self.device_id)
assert device == chainer.get_device(device_spec)
assert device.device.id == int(self.device_id)
@testing.parameterize(*testing.product(
{
'device_id': [None, -1, (), 0.0, numpy.float32(0)],
}))
@attr.gpu
class TestGpuDeviceFromDeviceIdInvalid(unittest.TestCase):
def test_from_device_id(self):
with pytest.raises(ValueError):
backend.GpuDevice.from_device_id(self.device_id)
@testing.backend.inject_backend_tests(
None,
[
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
])
class TestGpuDeviceUse(unittest.TestCase):
def test_use(self, backend_config):
device = backend.GpuDevice.from_device_id(backend_config.cuda_device)
with cuda.Device(0):
device.use()
assert device.device == cuda.Device()
@testing.backend.inject_backend_tests( # backend_config2
None,
[
{},
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
])
@testing.backend.inject_backend_tests( # backend_config1
None,
[
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
])
class TestGpuIsArraySupported(unittest.TestCase):
def test_is_array_supported(self, backend_config1, backend_config2):
target = backend_config1.device # backend.GpuDevice
arr = backend_config2.get_array(numpy.ndarray((2,), numpy.float32))
device = backend_config2.device
if (isinstance(device, backend.GpuDevice)
and device.device == target.device):
assert target.is_array_supported(arr)
else:
assert not target.is_array_supported(arr)
testing.run_module(__name__, __file__)
| 19,875
| 30.8016
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/initializer_tests/test_normal.py
|
import math
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import initializers
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
default_scale = {
initializers.Normal: 0.05,
}
default_coeff = {
initializers.HeNormal: math.sqrt(2),
}
default_fan = {
initializers.LeCunNormal: 'fan_in',
initializers.GlorotNormal: 'fan_avg',
initializers.HeNormal: 'fan_in',
}
@testing.parameterize(*testing.product({
'target,fan_option': [
(initializers.Normal, None),
(initializers.LeCunNormal, None),
(initializers.GlorotNormal, None),
(initializers.HeNormal, 'fan_in'),
(initializers.HeNormal, 'fan_out'),
],
'shape,fans': [
((2, 3), (3, 2)),
((2, 3, 4), (12, 8)),
],
'scale': [None, 7.3],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'rng_class': [None, numpy.random.RandomState],
}))
@testing.backend.inject_backend_tests(
None,
[
{},
{'use_ideep': 'always'},
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class TestNormal(unittest.TestCase):
def setUp(self):
kwargs = {}
if self.scale is not None:
kwargs['scale'] = self.scale
if self.fan_option is not None:
kwargs['fan_option'] = self.fan_option
if self.rng_class is not None:
kwargs['rng'] = self.rng_class()
self.target_kwargs = kwargs
def check_initializer(self, w):
initializer = self.target(**self.target_kwargs)
initializer(w)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
def test_initializer(self, backend_config):
w = numpy.empty(self.shape, dtype=self.dtype)
w = backend_config.get_array(w)
with chainer.using_device(backend_config.device):
self.check_initializer(w)
def check_shaped_initializer(self, backend_config):
initializer = self.target(dtype=self.dtype, **self.target_kwargs)
xp = backend_config.xp
w = initializers.generate_array(initializer, self.shape, xp)
self.assertIs(backend.get_array_module(w), xp)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
def test_shaped_initializer(self, backend_config):
with chainer.using_device(backend_config.device):
self.check_shaped_initializer(backend_config)
def check_initializer_statistics(self, backend_config, n):
from scipy import stats
xp = backend_config.xp
ws = numpy.empty((n,) + self.shape, dtype=self.dtype)
ws = backend_config.get_array(ws)
for i in range(n):
initializer = self.target(**self.target_kwargs)
initializer(xp.squeeze(ws[i:i+1], axis=0))
fan = self.fan_option or default_fan.get(self.target)
expected_std = self.scale or default_scale.get(self.target) or 1.
expected_std *= default_coeff.get(self.target) or 1.
if fan is not None:
if fan == 'fan_in':
expected_std *= math.sqrt(1. / self.fans[0])
elif fan == 'fan_out':
expected_std *= math.sqrt(1. / self.fans[1])
elif fan == 'fan_avg':
expected_std *= math.sqrt(2. / sum(self.fans))
else:
assert False
sampless = cuda.to_cpu(ws.reshape(n, -1).T)
alpha = 0.01 / len(sampless)
for samples in sampless:
_, p = stats.kstest(samples, stats.norm(0, expected_std).cdf)
assert p >= alpha
@testing.with_requires('scipy')
@condition.retry(3)
def test_initializer_statistics(self, backend_config):
with chainer.using_device(backend_config.device):
self.check_initializer_statistics(backend_config, 100)
@attr.slow
@testing.with_requires('scipy')
@condition.repeat_with_success_at_least(5, 3)
def test_initializer_statistics_slow(self, backend_config):
with chainer.using_device(backend_config.device):
self.check_initializer_statistics(backend_config, 10000)
testing.run_module(__name__, __file__)
| 4,513
| 31.710145
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/initializer_tests/test_orthogonal.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import initializers
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*testing.product({
'shape,dim_in,dim_out': [
((), 1, 1),
((1,), 1, 1),
((4, 3), 3, 4),
((6, 2, 3), 6, 6),
((3, 4, 5), 20, 3),
],
'scale,dtype': [
(2., numpy.float16),
(None, numpy.float32),
(None, numpy.float64),
(7.3, numpy.float32),
(7.3, numpy.float64),
],
'rng_class': [None, numpy.random.RandomState],
}))
@testing.backend.inject_backend_tests(
None,
[
{},
{'use_ideep': 'always'},
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class OrthogonalBase(unittest.TestCase):
target = initializers.Orthogonal
def setUp(self):
kwargs = {}
if self.scale is not None:
kwargs['scale'] = self.scale
if self.rng_class is not None:
kwargs['rng'] = self.rng_class()
self.target_kwargs = kwargs
self.check_options = {}
if self.dtype == numpy.float16:
self.check_options = {'atol': 5e-3, 'rtol': 5e-2}
def check_initializer(self, w):
initializer = self.target(**self.target_kwargs)
initializer(w)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
def test_initializer(self, backend_config):
w = numpy.empty(self.shape, dtype=self.dtype)
w = backend_config.get_array(w)
with chainer.using_device(backend_config.device):
self.check_initializer(w)
def check_shaped_initializer(self, backend_config):
initializer = self.target(dtype=self.dtype, **self.target_kwargs)
xp = backend_config.xp
w = initializers.generate_array(initializer, self.shape, xp)
self.assertIs(backend.get_array_module(w), xp)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
def test_shaped_initializer(self, backend_config):
with chainer.using_device(backend_config.device):
self.check_shaped_initializer(backend_config)
def check_orthogonality(self, w):
initializer = self.target(**self.target_kwargs)
initializer(w)
w = w.astype(numpy.float64).reshape(self.dim_out, self.dim_in)
if self.dim_in >= self.dim_out:
n = self.dim_out
dots = w.dot(w.T)
else:
n = self.dim_in
dots = w.T.dot(w)
expected_scale = self.scale or 1.1
testing.assert_allclose(
dots, numpy.identity(n) * expected_scale**2,
**self.check_options)
def test_orthogonality(self, backend_config):
w = numpy.empty(self.shape, dtype=self.dtype)
w = backend_config.get_array(w)
with chainer.using_device(backend_config.device):
self.check_orthogonality(w)
def check_initializer_statistics(self, backend_config, n):
from scipy import stats
xp = backend_config.xp
ws = numpy.empty((n,) + self.shape, dtype=self.dtype)
ws = backend_config.get_array(ws)
for i in range(n):
initializer = self.target(**self.target_kwargs)
initializer(xp.squeeze(ws[i:i+1], axis=0))
expected_scale = self.scale or 1.1
sampless = cuda.to_cpu(ws.reshape(n, -1).T)
alpha = 0.01 / len(sampless)
larger_dim = max(self.dim_out, self.dim_in)
ab = 0.5 * (larger_dim - 1)
for samples in sampless:
if larger_dim == 1:
numpy.testing.assert_allclose(abs(samples), expected_scale)
_, p = stats.chisquare((numpy.sign(samples) + 1) // 2)
else:
_, p = stats.kstest(
samples,
stats.beta(
ab, ab,
loc=-expected_scale,
scale=2*expected_scale
).cdf
)
assert p >= alpha
@testing.with_requires('scipy')
@condition.retry(3)
def test_initializer_statistics(self, backend_config):
with chainer.using_device(backend_config.device):
self.check_initializer_statistics(backend_config, 100)
@attr.slow
@testing.with_requires('scipy')
@condition.repeat_with_success_at_least(5, 3)
def test_initializer_statistics_slow(self, backend_config):
with chainer.using_device(backend_config.device):
self.check_initializer_statistics(backend_config, 10000)
class TestEmpty(unittest.TestCase):
def setUp(self):
self.w = numpy.empty(0, dtype=numpy.float32)
self.initializer = initializers.Orthogonal()
def check_assert(self, w):
with self.assertRaises(ValueError):
self.initializer(w)
def test_cpu(self):
self.check_assert(self.w)
@attr.gpu
def test_gpu(self):
self.check_assert(cuda.to_gpu(self.w))
@testing.parameterize(*(testing.product({
'shape': [(3,), (4, 3), (21, 4, 5)],
'mode': ['projection', 'basis'],
}) + testing.product({
'shape': [(0,), (3, 4, 5)],
'mode': ['embedding', 'basis'],
})))
class TestOrthogonalMode(unittest.TestCase):
def setUp(self):
self.w = numpy.empty(self.shape, dtype=numpy.float32)
self.initializer = initializers.Orthogonal(scale=1.0, mode=self.mode)
def check_invalid(self, w):
with self.assertRaises(ValueError):
self.initializer(w)
def test_invalid_cpu(self):
self.check_invalid(self.w)
@attr.gpu
def test_invalid_gpu(self):
self.check_invalid(cuda.to_gpu(self.w))
testing.run_module(__name__, __file__)
| 6,108
| 30.328205
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/initializer_tests/test_sampling.py
|
import unittest
import numpy
from chainer.backends import cuda
from chainer import initializers
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'target': [
initializers.UpsamplingDeconvFilter,
initializers.DownsamplingConvFilter,
],
'interpolation': ['linear'],
'shape': [(5, 5, 3, 3), (5, 5, 6, 6), (5, 1, 3, 3), (5, 1, 4, 4)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestUpsamplingDeconvFilter(unittest.TestCase):
def check_initializer(self, w):
initializer = self.target(
interpolation=self.interpolation, dtype=self.dtype)
initializer(w)
if self.target == initializers.UpsamplingDeconvFilter:
w_sum = ((numpy.array(self.shape[2:]) + 1) // 2).prod()
w_sum = w_sum * self.shape[0]
self.assertAlmostEqual(cuda.to_cpu(w).sum(), w_sum)
else:
self.assertAlmostEqual(cuda.to_cpu(w).sum(), 1.0 * self.shape[0])
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
def test_initializer_cpu(self):
w = numpy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
@attr.gpu
def test_initializer_gpu(self):
w = cuda.cupy.empty(self.shape, dtype=self.dtype)
self.check_initializer(w)
def check_shaped_initializer(self, xp):
initializer = self.target(dtype=self.dtype)
w = initializers.generate_array(initializer, self.shape, xp)
self.assertIs(cuda.get_array_module(w), xp)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
def test_shaped_initializer_cpu(self):
self.check_shaped_initializer(numpy)
@attr.gpu
def test_shaped_initializer_gpu(self):
self.check_shaped_initializer(cuda.cupy)
testing.run_module(__name__, __file__)
| 1,941
| 31.366667
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/initializer_tests/test_constant.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer import initializers
from chainer import testing
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.backend.inject_backend_tests(
None,
[
{},
{'use_ideep': 'always'},
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class TestIdentity(unittest.TestCase):
scale = 0.1
shape = (2, 2)
def setUp(self):
self.check_options = {}
if self.dtype == numpy.float16:
self.check_options = {'atol': 1e-4, 'rtol': 1e-3}
def check_initializer(self, w):
initializer = initializers.Identity(scale=self.scale)
initializer(w)
testing.assert_allclose(
w, self.scale * numpy.identity(len(self.shape)),
**self.check_options)
def test_initializer(self, backend_config):
w = numpy.empty(self.shape, dtype=self.dtype)
w = backend_config.get_array(w)
with chainer.using_device(backend_config.device):
self.check_initializer(w)
def check_shaped_initializer(self, backend_config):
initializer = initializers.Identity(
scale=self.scale, dtype=self.dtype)
xp = backend_config.xp
w = initializers.generate_array(initializer, self.shape, xp)
self.assertIs(backend.get_array_module(w), xp)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
testing.assert_allclose(
w, self.scale * numpy.identity(len(self.shape)),
**self.check_options)
def test_shaped_initializer(self, backend_config):
with chainer.using_device(backend_config.device):
self.check_shaped_initializer(backend_config)
@testing.parameterize(
{'shape': (2, 3)},
{'shape': (2, 2, 4)},
{'shape': ()},
{'shape': 0})
class TestIdentityInvalid(unittest.TestCase):
def setUp(self):
self.initializer = initializers.Identity()
def test_invalid_shape(self):
w = numpy.empty(self.shape, dtype=numpy.float32)
with self.assertRaises(ValueError):
self.initializer(w)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.backend.inject_backend_tests(
None,
[
{},
{'use_ideep': 'always'},
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class TestConstant(unittest.TestCase):
fill_value = 0.1
shape = (2, 3)
def setUp(self):
self.check_options = {}
if self.dtype == numpy.float16:
self.check_options = {'atol': 1e-4, 'rtol': 1e-3}
def check_initializer(self, w):
initializer = initializers.Constant(fill_value=self.fill_value)
initializer(w)
testing.assert_allclose(
w, numpy.full(self.shape, self.fill_value),
**self.check_options)
def test_initializer(self, backend_config):
w = numpy.empty(self.shape, dtype=self.dtype)
w = backend_config.get_array(w)
with chainer.using_device(backend_config.device):
self.check_initializer(w)
def check_shaped_initializer(self, backend_config):
initializer = initializers.Constant(
fill_value=self.fill_value, dtype=self.dtype)
xp = backend_config.xp
w = initializers.generate_array(initializer, self.shape, xp)
self.assertIs(backend.get_array_module(w), xp)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
testing.assert_allclose(
w, numpy.full(self.shape, self.fill_value),
**self.check_options)
def test_shaped_initializer(self, backend_config):
with chainer.using_device(backend_config.device):
self.check_shaped_initializer(backend_config)
testing.run_module(__name__, __file__)
| 4,417
| 31.014493
| 71
|
py
|
chainer
|
chainer-master/tests/chainer_tests/initializer_tests/test_init.py
|
import os
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import initializers
from chainer import testing
from chainer.testing import attr
import chainerx
class TestGenerateArray(unittest.TestCase):
def _generate_array(self, xp, dtype=None, device=None):
initializer = initializers.Zero(dtype)
return initializers.generate_array(initializer, (), xp, device=device)
def test_default_init(self):
default_dtype = os.environ.get('CHAINER_DTYPE', 'float32')
array = self._generate_array(numpy)
self.assertEqual(default_dtype, array.dtype)
def test_custom_init(self):
with chainer.using_config('dtype', 'float16'):
array = self._generate_array(numpy)
self.assertEqual('float16', array.dtype)
def test_init_with_initializer_dtype(self):
with chainer.using_config('dtype', 'float16'):
array = self._generate_array(numpy, 'float64')
self.assertEqual('float64', array.dtype)
@attr.gpu
def test_init_gpu(self):
array = self._generate_array(cuda.cupy, 'float64')
assert array.device == cuda.Device()
@attr.multi_gpu(2)
def test_init_gpu_with_device(self):
device = cuda.Device(1)
array = self._generate_array(cuda.cupy, 'float64', device)
assert array.device == device
@attr.multi_gpu(2)
def test_init_gpu_with_current_device(self):
device_id = 1
with cuda.get_device_from_id(device_id):
array = self._generate_array(cuda.cupy, 'float64')
assert array.device.id == device_id
@attr.chainerx
def test_init_chainerx_with_device(self):
device = chainerx.get_device('native:1')
array = self._generate_array(chainerx, 'float64', device)
assert array.device is device
@attr.chainerx
def test_init_chainerx_with_device_string(self):
device = 'native:1'
array = self._generate_array(chainerx, 'float64', device)
assert array.device.name == device
@attr.chainerx
def test_init_chainerx_with_default_device(self):
device = chainerx.get_device('native:1')
with chainerx.using_device(device):
array = self._generate_array(chainerx, 'float64')
assert array.device is device
@attr.chainerx
@attr.gpu
def test_init_chainerx_with_cuda(self):
device = chainerx.get_device('cuda:0')
array = self._generate_array(chainerx, 'float64', device)
assert array.device is device
class TestGetInitializer(unittest.TestCase):
def test_scalar(self):
init = initializers._get_initializer(10)
self.assertIsInstance(init, initializers.Constant)
x = numpy.empty((2, 3), dtype=numpy.int32)
init(x)
expected = numpy.full((2, 3), 10, dtype=numpy.int32)
numpy.testing.assert_array_equal(x, expected)
def test_numpy_array(self):
c = numpy.array([1, 2, 3])
init = initializers._get_initializer(c)
self.assertIsInstance(init, initializers.Constant)
x = numpy.empty((3,), dtype=numpy.int32)
init(x)
expected = numpy.array([1, 2, 3], dtype=numpy.int32)
numpy.testing.assert_array_equal(x, expected)
def test_callable(self):
def initializer(arr):
arr[...] = 100
init = initializers._get_initializer(initializer)
self.assertTrue(callable(init))
x = numpy.empty((2, 3), dtype=numpy.int32)
init(x)
expected = numpy.full((2, 3), 100, dtype=numpy.int32)
numpy.testing.assert_array_equal(x, expected)
testing.run_module(__name__, __file__)
| 3,692
| 29.775
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/initializer_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/initializer_tests/test_uniform.py
|
import math
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import initializers
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
default_scale = {
initializers.Uniform: 0.05,
}
default_coeff = {
initializers.LeCunUniform: math.sqrt(3),
initializers.GlorotUniform: math.sqrt(3),
initializers.HeUniform: math.sqrt(6),
}
default_fan = {
initializers.LeCunUniform: 'fan_in',
initializers.GlorotUniform: 'fan_avg',
initializers.HeUniform: 'fan_in',
}
@testing.parameterize(*testing.product({
'target,fan_option': [
(initializers.Uniform, None),
(initializers.LeCunUniform, None),
(initializers.GlorotUniform, None),
(initializers.HeUniform, None),
],
'shape,fans': [
((2, 3), (3, 2)),
((2, 3, 4), (12, 8)),
],
'scale': [None, 7.3],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'rng_class': [None, numpy.random.RandomState],
}))
@testing.backend.inject_backend_tests(
None,
[
{},
{'use_ideep': 'always'},
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class TestUniform(unittest.TestCase):
def setUp(self):
kwargs = {}
if self.scale is not None:
kwargs['scale'] = self.scale
if self.fan_option is not None:
kwargs['fan_option'] = self.fan_option
if self.rng_class is not None:
kwargs['rng'] = self.rng_class()
self.target_kwargs = kwargs
def check_initializer(self, w):
initializer = self.target(**self.target_kwargs)
initializer(w)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
def test_initializer(self, backend_config):
w = numpy.empty(self.shape, dtype=self.dtype)
w = backend_config.get_array(w)
with chainer.using_device(backend_config.device):
self.check_initializer(w)
def check_shaped_initializer(self, backend_config):
initializer = self.target(dtype=self.dtype, **self.target_kwargs)
xp = backend_config.xp
w = initializers.generate_array(initializer, self.shape, xp)
self.assertIs(backend.get_array_module(w), xp)
self.assertTupleEqual(w.shape, self.shape)
self.assertEqual(w.dtype, self.dtype)
def test_shaped_initializer(self, backend_config):
with chainer.using_device(backend_config.device):
self.check_shaped_initializer(backend_config)
def check_initializer_statistics(self, backend_config, n):
from scipy import stats
xp = backend_config.xp
ws = numpy.empty((n,) + self.shape, dtype=self.dtype)
ws = backend_config.get_array(ws)
for i in range(n):
initializer = self.target(**self.target_kwargs)
initializer(xp.squeeze(ws[i:i+1], axis=0))
fan = self.fan_option or default_fan.get(self.target)
expected_max = self.scale or default_scale.get(self.target) or 1.
expected_max *= default_coeff.get(self.target) or 1.
if fan is not None:
if fan == 'fan_in':
expected_max *= math.sqrt(1. / self.fans[0])
elif fan == 'fan_out':
expected_max *= math.sqrt(1. / self.fans[1])
elif fan == 'fan_avg':
expected_max *= math.sqrt(2. / sum(self.fans))
else:
assert False
sampless = cuda.to_cpu(ws.reshape(n, -1).T)
alpha = 0.01 / len(sampless)
for samples in sampless:
_, p = stats.kstest(
samples,
stats.uniform(-expected_max, 2*expected_max).cdf
)
assert p >= alpha
@testing.with_requires('scipy')
@condition.retry(3)
def test_initializer_statistics(self, backend_config):
with chainer.using_device(backend_config.device):
self.check_initializer_statistics(backend_config, 100)
@attr.slow
@testing.with_requires('scipy')
@condition.repeat_with_success_at_least(5, 3)
def test_initializer_statistics_slow(self, backend_config):
with chainer.using_device(backend_config.device):
self.check_initializer_statistics(backend_config, 10000)
testing.run_module(__name__, __file__)
| 4,629
| 31.605634
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/optimizer_hooks_tests/test_weight_decay.py
|
import unittest
import numpy as np
import chainer
import chainer.functions as F
from chainer import optimizer_hooks
from chainer import optimizers
from chainer import testing
import utils
_backend_params = [
# NumPy
{},
{'use_ideep': 'always'},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestWeightDecay(unittest.TestCase):
def setUp(self):
self.target = utils.ParametersLink.from_param_props(
((2, 3), (2, 0, 1), ()))
def check_weight_decay(self, backend_configs):
target = self.target
assert len(backend_configs) == len(list(target.params()))
devices = [bc.device for bc in backend_configs]
decay = 0.2
# Compute expected
expects = []
for param, device in zip(target.params(), devices):
expects.append(param.array - param.grad - decay * param.array)
param.to_device(device)
opt = optimizers.SGD(lr=1)
opt.setup(self.target)
opt.add_hook(optimizer_hooks.WeightDecay(decay))
opt.update()
# Validate
for expect, param in zip(expects, target.params()):
testing.assert_allclose(expect, param.array)
def test_weight_decay(self, backend_config0,
backend_config1, backend_config2):
self.check_weight_decay(
[backend_config0, backend_config1, backend_config2])
# TODO(kshitij12345): Test with chainerx when `loss_scale` in `backward`.
@testing.inject_backend_tests(
None,
# CPU tests
[{}, {'use_ideep': 'always'}]
# GPU tests
+ testing.product({
'use_cuda': [True],
})
+ [
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'}
]
)
class TestWeightDecayLossScale(unittest.TestCase):
def test_weight_decay_loss_scale(self, backend_config):
a = self._updated_array(backend_config, None)
b = self._updated_array(backend_config, loss_scale=4.)
testing.assert_allclose(a, b)
def _updated_array(self, backend_config, loss_scale):
arr = np.arange(3, dtype=np.float32)
param = chainer.Parameter(arr)
link = chainer.Link()
with link.init_scope():
link.p = param
link.to_device(backend_config.device)
opt = optimizers.SGD(lr=1)
opt.setup(link)
opt.add_hook(optimizer_hooks.WeightDecay(1/8.))
loss = F.sum(link.p ** 3)
loss.backward(loss_scale=loss_scale)
opt.update()
return link.p.array
testing.run_module(__name__, __file__)
| 3,058
| 28.413462
| 74
|
py
|
chainer
|
chainer-master/tests/chainer_tests/optimizer_hooks_tests/test_gradient_lars.py
|
import unittest
import numpy as np
import chainer
from chainer import optimizer_hooks
from chainer import optimizers
from chainer import testing
import utils
_backend_params = [
# NumPy
{},
{'use_ideep': 'always'},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestGradientLARS(unittest.TestCase):
def setUp(self):
link1 = utils.ParametersLink.from_param_props(
((2, 3), (2, 0, 1), (0,)))
link2 = utils.ParametersLink.from_param_props(
((5, 0, 1), (0,), (7, 3)))
for param in link2.params():
param.array[...] *= 0.0001
self.target = chainer.ChainList(link1, link2)
def check_LARS(self, backend_configs):
target = self.target
devices = [bc.device for bc in backend_configs]
assert len(backend_configs) == len(list(target[0].params()))
assert len(backend_configs) == len(list(target[1].params()))
threshold = 1e-2
weight_decay = 0.2
eps = 1e-9
expects0 = []
expects1 = []
# Compute expected
for param, device in zip(target[0].params(), devices):
p0_norm = np.linalg.norm(param.array)
g0_norm = np.linalg.norm(param.grad)
clip_rate = p0_norm / (eps + g0_norm + weight_decay * p0_norm)
expects0.append(param.array - clip_rate
* (param.grad + weight_decay * param.array))
param.to_device(device)
for param, device in zip(target[1].params(), devices):
expects1.append(param.array - 1.0
* (param.grad + weight_decay * param.array))
opt = optimizers.SGD(lr=1)
opt.setup(self.target)
opt.add_hook(optimizer_hooks.GradientLARS(threshold=threshold,
weight_decay=weight_decay,
eps=eps))
opt.update()
for expect, param in zip(expects0, target[0].params()):
testing.assert_allclose(expect, param.array)
for expect, param in zip(expects1, target[1].params()):
testing.assert_allclose(expect, param.array)
def test_LARS(self, backend_config0,
backend_config1, backend_config2):
self.check_LARS(
[backend_config0, backend_config1, backend_config2])
testing.run_module(__name__, __file__)
| 2,840
| 33.228916
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/optimizer_hooks_tests/test_gradient_noise.py
|
import itertools
import unittest
import mock
import numpy as np
from chainer import optimizer_hooks
from chainer import optimizers
from chainer import testing
import utils
_backend_params = [
# NumPy
{},
{'use_ideep': 'always'},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestGradientNoise(unittest.TestCase):
eta = 0.01
def setUp(self):
self.target = utils.ParametersLink.from_param_props(
# TODO(niboshi): Use different shapes
((2, 3), (2, 3), (2, 3)))
self.noise_value = np.random.normal(
loc=0, scale=np.sqrt(self.eta / np.power(1, 0.55)),
size=(2, 3)).astype(np.float32)
def check_gradient_noise(self, backend_configs):
target = self.target
assert len(backend_configs) == len(list(target.params()))
devices = [bc.device for bc in backend_configs]
noise_value = np.asarray(self.noise_value)
expects = []
# Compute expected
for param, device in zip(target.params(), devices):
expects.append(param.array - param.grad - noise_value)
param.to_device(device)
def test_noise(xp, shape, dtype, hook, opt):
# Make noise value an array of current backend
return xp.array(noise_value)
noise = mock.Mock(side_effect=test_noise)
opt = optimizers.SGD(lr=1)
opt.setup(self.target)
hook = optimizer_hooks.GradientNoise(self.eta, noise_func=noise)
opt.add_hook(hook)
opt.update()
# Validate
for expect, param in zip(expects, target.params()):
testing.assert_allclose(expect, param.array)
self.assertEqual(noise.call_count, len(tuple(self.target.params())))
calls = []
for param in target.params():
xp = param.device.xp
calls.append(mock.call(xp, (2, 3), np.dtype('float32'), hook,
param.update_rule))
# Order does not matter
assert(any([noise.mock_calls == list(permuted_calls)
for permuted_calls in itertools.permutations(calls)]))
def test_gradient_noise(self, backend_config0,
backend_config1, backend_config2):
self.check_gradient_noise(
[backend_config0, backend_config1, backend_config2])
testing.run_module(__name__, __file__)
| 2,820
| 30.696629
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/optimizer_hooks_tests/utils.py
|
import numpy
import chainer
class ParametersLink(chainer.Link):
'''Link with specific parameters.'''
def __init__(self, params):
super(ParametersLink, self).__init__()
with self.init_scope():
for i, p in enumerate(params):
setattr(self, 'p{}'.format(i), p)
@staticmethod
def from_param_props(shapes, dtypes=numpy.float32):
# Creates a ParameterLink from the given parameter properties.
assert isinstance(shapes, (tuple, list))
assert all(isinstance(s, tuple) for s in shapes)
n_params = len(shapes)
if not isinstance(dtypes, (tuple, list)):
dtypes = (dtypes,) * n_params
arrs = [
numpy.random.uniform(-3, 3, shape).astype(dtype)
for shape, dtype in zip(shapes, dtypes)]
grads = [
numpy.random.uniform(-3, 3, shape).astype(dtype)
for shape, dtype in zip(shapes, dtypes)]
params = []
for arr, grad in zip(arrs, grads):
param = chainer.Parameter(arr)
param.grad = grad
params.append(param)
return ParametersLink(params)
| 1,163
| 28.1
| 70
|
py
|
chainer
|
chainer-master/tests/chainer_tests/optimizer_hooks_tests/test_gradient_hard_clipping.py
|
import unittest
import numpy as np
from chainer import optimizer_hooks
from chainer import optimizers
from chainer import testing
import utils
_backend_params = [
# NumPy
{},
{'use_ideep': 'always'},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestGradientHardClipping(unittest.TestCase):
def setUp(self):
self.target = utils.ParametersLink.from_param_props(
((2, 3), (2, 0, 1), ()))
def check_hardclipping(self, backend_configs):
target = self.target
assert len(backend_configs) == len(list(target.params()))
devices = [bc.device for bc in backend_configs]
lower_bound = -0.9
upper_bound = 1.1
expects = []
# Compute expected
for param, device in zip(target.params(), devices):
expects.append(param.array - np.clip(param.grad,
lower_bound, upper_bound))
param.to_device(device)
# Apply optimizer_hook
opt = optimizers.SGD(lr=1)
opt.setup(self.target)
opt.add_hook(
optimizer_hooks.GradientHardClipping(lower_bound, upper_bound))
opt.update()
# Validate
for expect, param in zip(expects, target.params()):
testing.assert_allclose(expect, param.array)
def test_hardclipping(self, backend_config0,
backend_config1, backend_config2):
self.check_hardclipping(
[backend_config0, backend_config1, backend_config2])
testing.run_module(__name__, __file__)
| 2,000
| 28.865672
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/optimizer_hooks_tests/test_gradient_clipping.py
|
import math
import unittest
import numpy as np
import chainer
from chainer import optimizer_hooks
from chainer import optimizers
from chainer import testing
import utils
_backend_params = [
# NumPy
{},
{'use_ideep': 'always'},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
class SimpleLink(chainer.Link):
def __init__(self, params):
super(SimpleLink, self).__init__()
with self.init_scope():
for i, p in enumerate(params):
setattr(self, 'p{}'.format(i), p)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestGradientClipping(unittest.TestCase):
def setUp(self):
self.target = utils.ParametersLink.from_param_props(
((2, 3), (2, 0, 1), ()))
self.norm = math.sqrt(sum([
np.square(param.grad).sum() for param in self.target.params()]))
def check_clipping(self, backend_configs, rate):
target = self.target
norm = self.norm
assert len(backend_configs) == len(list(target.params()))
devices = [bc.device for bc in backend_configs]
threshold = norm * rate
expects = []
for param, device in zip(target.params(), devices):
expects.append(param.array - param.grad * min(1, rate))
param.to_device(device)
opt = optimizers.SGD(lr=1)
opt.setup(target)
opt.add_hook(
optimizer_hooks.GradientClipping(threshold))
opt.update()
for expect, param in zip(expects, target.params()):
testing.assert_allclose(expect, param.array)
def test_clipping(
self, backend_config0, backend_config1, backend_config2):
self.check_clipping(
[backend_config0, backend_config1, backend_config2],
0.5)
def test_clipping_2(
self, backend_config0, backend_config1, backend_config2):
self.check_clipping(
[backend_config0, backend_config1, backend_config2],
2.0)
testing.run_module(__name__, __file__)
| 2,411
| 27.714286
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/optimizer_hooks_tests/test_lasso.py
|
import unittest
import numpy as np
from chainer import optimizer_hooks
from chainer import optimizers
from chainer import testing
import utils
_backend_params = [
# NumPy
{},
{'use_ideep': 'always'},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
@testing.backend.inject_backend_tests(None, _backend_params)
class TestLasso(unittest.TestCase):
def setUp(self):
self.target = utils.ParametersLink.from_param_props(
((2, 3), (2, 0, 1), ()))
def check_lasso(self, backend_configs):
target = self.target
assert len(backend_configs) == len(list(target.params()))
devices = [bc.device for bc in backend_configs]
decay = 0.2
expects = []
# Compute expected
for param, device in zip(target.params(), devices):
expects.append(param.array - param.grad -
decay * np.sign(param.array))
param.to_device(device)
# Compute using optimizer_hook
opt = optimizers.SGD(lr=1)
opt.setup(target)
opt.add_hook(optimizer_hooks.Lasso(decay))
opt.update()
# Validate
for expect, param in zip(expects, target.params()):
testing.assert_allclose(expect, param.array)
def test_lasso(self, backend_config0,
backend_config1, backend_config2):
self.check_lasso([backend_config0, backend_config1, backend_config2])
testing.run_module(__name__, __file__)
| 1,841
| 27.78125
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/utils_tests/test_type_check.py
|
import pickle
import sys
import unittest
import warnings
import numpy
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check as T
class TestConstant(unittest.TestCase):
def setUp(self):
self.x = T.Constant(10)
def test_str(self):
self.assertEqual('10', str(self.x))
def test_eval(self):
self.assertEqual(10, self.x.eval())
class TestVariable(unittest.TestCase):
def setUp(self):
self.x = T.Variable(10, 'x')
def test_str(self):
self.assertEqual('x', str(self.x))
def test_eval(self):
self.assertEqual(10, self.x.eval())
class Object(object):
def __init__(self):
self.value = 10
class TestGetAttr(unittest.TestCase):
def setUp(self):
x = Object()
self.value = T.GetAttr(T.Variable(x, 'x'), 'value')
self.value2 = T.GetAttr(T.Variable(x, 'x'), T.Constant('value'))
self.value3 = T.GetAttr(T.Variable(x, 'x'), 3)
def test_str(self):
self.assertEqual('x.value', str(self.value))
self.assertEqual('x.value', str(self.value2))
self.assertEqual('getattr(x, 3)', str(self.value3))
def test_eval(self):
self.assertEqual(10, self.value.eval())
class TestGetItem(unittest.TestCase):
def setUp(self):
x = T.Variable([1, 2, 3], 'x')
y = T.Variable({'a': 1, 'b': 2}, 'y')
self.x = x
self.v1 = T.GetItem(x, 1)
self.v2 = T.GetItem(y, 'a')
def test_str(self):
self.assertEqual('x[1]', str(self.v1))
self.assertEqual('y[\'a\']', str(self.v2))
x = self.x
self.assertEqual('x[:]', str(x[:]))
self.assertEqual('x[:]', str(x[::]))
self.assertEqual('x[1:]', str(x[1:]))
self.assertEqual('x[:2]', str(x[:2]))
self.assertEqual('x[1:2]', str(x[1:2]))
self.assertEqual('x[1::1]', str(x[1::1]))
self.assertEqual('x[:2:1]', str(x[:2:1]))
self.assertEqual('x[1:2:1]', str(x[1:2:1]))
self.assertEqual('x[...]', str(x[...]))
self.assertEqual('x[0, 1]', str(x[0, 1]))
self.assertEqual('x[1:2, ...]', str(x[1:2:, ...]))
def test_eval(self):
self.assertEqual(2, self.v1.eval())
self.assertEqual(1, self.v2.eval())
class TestCall(unittest.TestCase):
def setUp(self):
f = T.Variable(sum, 'sum')
self.c1 = T.Call(f, ([1, 2, 3],))
self.c2 = f([1, 2, 3])
self.c3 = T.Call(f, (['', 1],))
def test_str(self):
self.assertEqual('sum([1, 2, 3])', str(self.c1))
self.assertEqual('sum([1, 2, 3])', str(self.c2))
self.assertEqual('sum([\'\', 1])', str(self.c3))
def test_eval(self):
self.assertEqual(6, self.c1.eval())
self.assertEqual(6, self.c2.eval())
# an error is occurred in `eval`
with self.assertRaises(TypeError):
self.assertEqual(6, self.c3.eval())
class TestBinaryOperator(unittest.TestCase):
def setUp(self):
x = T.Variable(1, 'x')
y = T.Variable(1, 'y')
def f(x, y):
return x, y
self.op1 = T.BinaryOperator(7, x, y, '+', f)
self.op2 = T.BinaryOperator(8, x, y, '+', f)
self.op3 = T.BinaryOperator(9, x, y, '+', f)
self.op4 = T.BinaryOperator(7, x, y, '+', f, True)
self.op5 = T.BinaryOperator(8, x, y, '+', f, True)
self.op6 = T.BinaryOperator(9, x, y, '+', f, True)
def test_str(self):
self.assertEqual('x + y', str(self.op1))
self.assertEqual('x + (y)', str(self.op2))
self.assertEqual('(x) + (y)', str(self.op3))
self.assertEqual('x + y', str(self.op4))
self.assertEqual('(x) + y', str(self.op5))
self.assertEqual('(x) + (y)', str(self.op6))
def test_eval(self):
self.assertEqual((1, 1), self.op1.eval())
class TestUnaryOperator(unittest.TestCase):
def setUp(self):
x = T.Variable(1, 'x')
def f(x):
return x,
self.op1 = T.UnaryOperator(8, x, '-', f)
self.op2 = T.UnaryOperator(9, x, '-', f)
def test_str(self):
self.assertEqual('-x', str(self.op1))
self.assertEqual('-(x)', str(self.op2))
def test_eval(self):
self.assertEqual((1, ), self.op1.eval())
class TestOperators(unittest.TestCase):
def setUp(self):
self.x = T.Variable(1, 'x')
self.y = T.Variable(1, 'y')
def test_str(self):
x = self.x
y = self.y
self.assertEqual('x + y', str(x + y))
self.assertEqual('1 + x', str(1 + x))
self.assertEqual('x - y', str(x - y))
self.assertEqual('1 - x', str(1 - x))
self.assertEqual('x * y', str(x * y))
self.assertEqual('1 * x', str(1 * x))
self.assertEqual('x / y', str(x / y))
self.assertEqual('1 / x', str(1 / x))
self.assertEqual('x // y', str(x // y))
self.assertEqual('1 // x', str(1 // x))
self.assertEqual('x % y', str(x % y))
self.assertEqual('1 % x', str(1 % x))
self.assertEqual('x ** y', str(x ** y))
self.assertEqual('x ** y', str(pow(x, y)))
self.assertEqual('x << y', str(x << y))
self.assertEqual('1 << x', str(1 << x))
self.assertEqual('x >> y', str(x >> y))
self.assertEqual('1 >> x', str(1 >> x))
self.assertEqual('x & y', str(x & y))
self.assertEqual('1 & x', str(1 & x))
self.assertEqual('x ^ y', str(x ^ y))
self.assertEqual('1 ^ x', str(1 ^ x))
self.assertEqual('x | y', str(x | y))
self.assertEqual('1 | x', str(1 | x))
self.assertEqual('-x', str(-x))
self.assertEqual('+x', str(+x))
self.assertEqual('~x', str(~x))
# left-associative
self.assertEqual('x + x - x', str(x + x - x))
self.assertEqual('x + (x - x)', str(x + (x - x)))
self.assertEqual('x << (x << x)', str(x << (x << x)))
# right-associative
self.assertEqual('x ** x ** x', str(x ** x ** x))
self.assertEqual('x ** x ** x', str(x ** (x ** x)))
self.assertEqual('(x ** x) ** x', str((x ** x) ** x))
self.assertEqual('-(x + x)', str(-(x + x)))
# pow has higher priority than unary operators
self.assertEqual('-x ** x', str(-x ** x))
self.assertEqual('(-x) ** x', str((-x) ** x))
def test_priority(self):
x = self.x
y = self.y
self.assertTrue((x << y).priority == (x >> y).priority)
self.assertTrue((x + y).priority == (x - y).priority)
self.assertTrue((x * y).priority ==
(x / y).priority ==
(x // y).priority ==
(x % y).priority)
self.assertTrue((-x).priority == (+x).priority == (~x).priority)
self.assertTrue((x | y).priority <
(x ^ y).priority <
(x & y).priority <
(x << y).priority <
(x + y).priority <
(x * y).priority <
(-x).priority <
(x ** y).priority <
x.priority)
class TestDivOperator(unittest.TestCase):
def setUp(self):
self.x = T.Variable(1, 'x')
self.y = T.Variable(2, 'y')
def test_div(self):
# Behavior of '/' operator for int depends on the version of Python
if sys.version_info < (3, 0, 0):
self.assertEqual(0, (self.x / self.y).eval())
else:
self.assertEqual(0.5, (self.x / self.y).eval())
class TestGetType(unittest.TestCase):
def test_empty(self):
ts = T.get_types((), 'name', False)
self.assertIsInstance(ts, T.TypeInfoTuple)
self.assertEqual(0, len(ts))
self.assertEqual('name', ts.name)
def test_simple(self):
data = (numpy.zeros((1, 2, 3)).astype(numpy.float32),)
ts = T.get_types(data, 'name', False)
self.assertIsInstance(ts, T.TypeInfoTuple)
self.assertEqual(1, len(ts))
self.assertEqual('name', ts.name)
t = ts[0]
self.assertIsInstance(t, T.Expr)
self.assertEqual(1, t.shape[0].eval())
self.assertEqual(2, t.shape[1].eval())
self.assertEqual(3, t.shape[2].eval())
self.assertEqual(3, t.ndim.eval())
self.assertEqual(numpy.float32, t.dtype.eval())
def test_invalid_arg(self):
with self.assertRaises(AssertionError):
T.get_types(1, 'name', False)
class TestBoolBinaryOperator(unittest.TestCase):
def setUp(self):
x = T.Variable(1, 'x')
y = T.Variable(1, 'y')
z = T.Variable(2, 'z')
def f(x, y):
return x == y
self.op1 = T.BoolBinaryOperator(x, y, '==', '!=', f)
self.op2 = T.BoolBinaryOperator(x, z, '==', '!=', f)
def test_eval(self):
self.assertTrue(self.op1.eval())
def test_expect(self):
with self.assertRaises(T.InvalidType):
self.op2.expect()
def test_bool(self):
with self.assertRaises(RuntimeError):
bool(self.op1)
def test_bool_operator(self):
with self.assertRaises(RuntimeError):
not self.op1
class TestLazyGetItem(unittest.TestCase):
def setUp(self):
self.t = T.Constant(0)
def test_evaluate_size(self):
# __getitem__, __getattr__ and forward only make syntax trees, but
# they are not evalated yet
self.assertIsInstance(self.t[1], T.Expr)
self.assertIsInstance(self.t.x, T.Expr)
self.assertIsInstance(self.t(), T.Expr)
# an error is raised on evaluation time
with self.assertRaises(TypeError):
self.t[1].eval()
with self.assertRaises(AttributeError):
self.t.x.eval()
with self.assertRaises(TypeError):
self.t().eval()
class TestListItem(unittest.TestCase):
def test_eval_list_items(self):
self.assertTrue((T.Constant([0]) == [T.Constant(0)]).eval())
def test_list_str(self):
self.assertEqual('[0]', T._repr([T.Constant(0)]))
def test_eval_tuple_items(self):
self.assertTrue((T.Constant((0,)) == (T.Constant(0),)).eval())
def test_tuple_str(self):
self.assertEqual('()', T._repr(()))
self.assertEqual('(0,)', T._repr((T.Constant(0),)))
self.assertEqual('(0, 0)', T._repr((T.Constant(0), T.Constant(0))))
def test_eval_nest_list(self):
self.assertTrue((T.Constant([[0]]) == [[T.Constant(0)]]).eval())
def test_nest_list_str(self):
self.assertEqual('[[0]]', T._repr([[T.Constant(0)]]))
class TestProd(unittest.TestCase):
def test_name(self):
p = T.prod([])
self.assertEqual(str(p), 'prod([])')
def test_value(self):
value = T.prod([2, 3]).eval()
self.assertEqual(value, 6)
class TestSameTypes(unittest.TestCase):
def test_all_numpy_array(self):
x = numpy.array([0])
y = numpy.array([1])
z = numpy.array([2])
self.assertTrue(T.same_types(x, y, z))
def test_all_numpy_subclasses(self):
x = numpy.array([0])
y = numpy.array([[1], [2]])
with warnings.catch_warnings():
warnings.simplefilter('ignore')
z = numpy.matrix('3,4; 5,6')
self.assertTrue(T.same_types(x, y, z))
@attr.gpu
def test_all_cupy_array(self):
x = cuda.cupy.array([0])
y = cuda.cupy.array([1])
z = cuda.cupy.array([2])
self.assertTrue(T.same_types(x, y, z))
@attr.gpu
def test_numpy_cupy_mixed_1(self):
x = numpy.array([0])
y = cuda.cupy.array([1])
z = numpy.array([2])
self.assertFalse(T.same_types(x, y, z))
@attr.gpu
def test_numpy_cupy_mixed_2(self):
x = cuda.cupy.array([0])
y = numpy.array([1])
z = cuda.cupy.array([2])
self.assertFalse(T.same_types(x, y, z))
class TestInvalidType(unittest.TestCase):
def test_pickle(self):
exc = T.InvalidType('foo', 'bar', 'baz')
new = pickle.loads(pickle.dumps(exc))
self.assertEqual(exc.args, new.args)
self.assertEqual(exc.expect, new.expect)
self.assertEqual(exc.actual, new.actual)
testing.run_module(__name__, __file__)
| 12,347
| 29.117073
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/utils_tests/test_cache.py
|
import unittest
import numpy
import chainer
from chainer import testing
from chainer.utils import cache
class MockDistribution(object):
def __init__(self, x):
self.x = x
self.h_call_count = 0
self.y_call_count = 0
@cache.cached_property
def h(self):
self.h_call_count += 1
return self.x * 2
@cache.cached_property
def y(self):
self.y_call_count += 1
return self.h * 3
class TestCachedProperty(unittest.TestCase):
def test_name(self):
assert MockDistribution.y.__name__ == 'y'
def test1(self):
obj = MockDistribution(chainer.Variable(numpy.array([1.])))
h0 = obj.h
h1 = obj.h
assert obj.h_call_count == 1
assert h0 is h1
numpy.testing.assert_allclose(h0.array, 2.)
def test2(self):
obj = MockDistribution(chainer.Variable(numpy.array([1.])))
with chainer.no_backprop_mode():
h0 = obj.h
h1 = obj.h
assert obj.h_call_count == 1
assert h0 is h1
numpy.testing.assert_allclose(h0.array, 2.)
def test3(self):
obj = MockDistribution(chainer.Variable(numpy.array([1.])))
h0 = obj.h
with chainer.no_backprop_mode():
h1 = obj.h
h2 = obj.h
with chainer.no_backprop_mode():
h3 = obj.h
assert obj.h_call_count <= 2
assert h0 is h2
assert h0 is not h1
assert h1 is h3
numpy.testing.assert_allclose(h0.array, 2.)
numpy.testing.assert_allclose(h1.array, 2.)
def test_attrs1(self):
obj = MockDistribution(chainer.Variable(numpy.array([1.])))
h0 = obj.h
y0 = obj.y
h1 = obj.h
y1 = obj.y
assert obj.h_call_count == 1
assert obj.y_call_count == 1
assert h0 is h1
assert y0 is y1
numpy.testing.assert_allclose(h0.array, 2.)
numpy.testing.assert_allclose(y0.array, 6.)
def test_objs1(self):
obj0 = MockDistribution(chainer.Variable(numpy.array([1.])))
obj1 = MockDistribution(chainer.Variable(numpy.array([10.])))
y00 = obj0.y
y10 = obj1.y
y01 = obj0.y
y11 = obj1.y
assert obj0.y_call_count == 1
assert obj1.y_call_count == 1
assert y00 is y01
assert y10 is y11
numpy.testing.assert_allclose(y00.array, 6.)
numpy.testing.assert_allclose(y10.array, 60.)
testing.run_module(__name__, __file__)
| 2,502
| 25.62766
| 69
|
py
|
chainer
|
chainer-master/tests/chainer_tests/utils_tests/test_conv_nd.py
|
import itertools
import unittest
import numpy
from six import moves
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
from chainer.utils import conv_nd
class TestAsTuple(unittest.TestCase):
def test_scalar(self):
actual = conv_nd.as_tuple(1, 3)
expected = (1, 1, 1)
self.assertEqual(actual, expected)
def test_tuple(self):
actual = conv_nd.as_tuple((1, 2, 3), 3)
expected = (1, 2, 3)
self.assertEqual(actual, expected)
def test_list(self):
actual = conv_nd.as_tuple([1, 2, 3], 3)
expected = (1, 2, 3)
self.assertEqual(actual, expected)
def test_tuple_invalid_length(self):
with self.assertRaises(AssertionError):
conv_nd.as_tuple((1,), 3)
@testing.parameterize(*testing.product({
'dims': [(10,), (10, 8), (10, 8, 6)],
}))
class TestIm2ColND(unittest.TestCase):
def setUp(self):
shape = (2, 3) + self.dims
self.img = numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
def check_im2col_nd(self, ksize, stride, pad, gpu):
dims = self.dims
if gpu:
img = cuda.to_gpu(self.img)
else:
img = self.img
col = conv_nd.im2col_nd(img, ksize, stride, pad)
outs = tuple(conv_nd.get_conv_outsize(d, k, s, p)
for (d, k, s, p) in zip(dims, ksize, stride, pad))
expected_shape = (2, 3) + ksize + outs
self.assertEqual(col.shape, expected_shape)
col = cuda.to_cpu(col)
for n in moves.range(2):
for c in moves.range(3):
for xs in itertools.product(
*[moves.range(out) for out in outs]):
for dxs in itertools.product(
*[moves.range(k) for k in ksize]):
oxs = tuple(x * s - p + dx
for (x, s, p, dx)
in zip(xs, stride, pad, dxs))
if all(0 <= ox < d for (ox, d) in zip(oxs, dims)):
col_index = (n, c) + dxs + xs
img_index = (n, c) + oxs
self.assertEqual(
col[col_index], self.img[img_index])
else:
col_index = (n, c) + dxs + xs
self.assertEqual(col[col_index], 0)
def test_im2col_nd_1_cpu(self):
ndim = len(self.dims)
ksize = (1,) * ndim
stride = (1,) * ndim
pad = (1,) * ndim
self.check_im2col_nd(ksize, stride, pad, gpu=False)
def test_im2col_nd_2_cpu(self):
ndim = len(self.dims)
ksize = (2,) * ndim
stride = (2,) * ndim
pad = (2,) * ndim
self.check_im2col_nd(ksize, stride, pad, gpu=False)
def test_im2col_nd_3_cpu(self):
ndim = len(self.dims)
ksize = (1, 2, 1)[:ndim]
stride = (2, 1, 2)[:ndim]
pad = (1, 2, 1)[:ndim]
self.check_im2col_nd(ksize, stride, pad, gpu=False)
@attr.gpu
def test_im2col_nd_1_gpu(self):
ndim = len(self.dims)
ksize = (1,) * ndim
stride = (1,) * ndim
pad = (1,) * ndim
self.check_im2col_nd(ksize, stride, pad, gpu=True)
@attr.gpu
def test_im2col_nd_2_gpu(self):
ndim = len(self.dims)
ksize = (2,) * ndim
stride = (2,) * ndim
pad = (2,) * ndim
self.check_im2col_nd(ksize, stride, pad, gpu=True)
@attr.gpu
def test_im2col_nd_3_gpu(self):
ndim = len(self.dims)
ksize = (1, 2, 1)[:ndim]
stride = (2, 1, 2)[:ndim]
pad = (1, 2, 1)[:ndim]
self.check_im2col_nd(ksize, stride, pad, gpu=True)
class TestIm2ColNDParameterRanks(unittest.TestCase):
def setUp(self):
shape = (2, 3, 4, 3)
self.ksize = (2, 2)
self.stride = (1, 1)
self.pad = (0, 0)
self.img = numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
def test_im2col_nd_cpu_parameter_ranks(self):
# Invalid ksize length.
with self.assertRaises(AssertionError):
conv_nd.im2col_nd_cpu(self.img, (2,), self.stride, self.pad)
# Invalid stride length.
with self.assertRaises(AssertionError):
conv_nd.im2col_nd_cpu(self.img, self.ksize, (1,), self.pad)
# Invalid pad length.
with self.assertRaises(AssertionError):
conv_nd.im2col_nd_cpu(self.img, self.ksize, self.stride, (0,))
@attr.gpu
def test_im2col_nd_gpu_parameter_ranks(self):
img_gpu = cuda.to_gpu(self.img)
# Invalid ksize length.
with self.assertRaises(AssertionError):
conv_nd.im2col_nd_gpu(img_gpu, (2,), self.stride, self.pad)
# Invalid stride length.
with self.assertRaises(AssertionError):
conv_nd.im2col_nd_gpu(img_gpu, self.ksize, (1,), self.pad)
# Invalid pad length.
with self.assertRaises(AssertionError):
conv_nd.im2col_nd_gpu(img_gpu, self.ksize, self.stride, (0,))
@testing.parameterize(*testing.product({
'dims': [(10,), (10, 8), (10, 8, 6)],
}))
class TestCol2ImND(unittest.TestCase):
def check_col2im_nd(self, ksize, stride, pad, gpu):
dims = self.dims
outs = tuple(conv_nd.get_conv_outsize(d, k, s, p)
for (d, k, s, p) in zip(dims, ksize, stride, pad))
col_shape = (2, 3) + ksize + outs
col = numpy.random.uniform(-1, 1, col_shape).astype(numpy.float32)
if gpu:
col_data = cuda.to_gpu(col)
else:
col_data = col
img = conv_nd.col2im_nd(col_data, stride, pad, dims)
img = cuda.to_cpu(img)
img_shape = (2, 3) + dims
self.assertEqual(img.shape, img_shape)
for n in moves.range(2):
for c in moves.range(3):
for xs in itertools.product(
*[moves.range(d) for d in dims]):
v = numpy.float32(0.0)
for dxs in itertools.product(
*[moves.range(k) for k in ksize]):
oxs = tuple((x + p - dx) // s
for (x, p, dx, s)
in zip(xs, pad, dxs, stride))
if all((x + p - dx) % s == 0
for (x, p, dx, s)
in zip(xs, pad, dxs, stride)) and \
all(0 <= ox < out
for (ox, out) in zip(oxs, outs)):
col_index = (n, c) + dxs + oxs
v += col[col_index]
img_index = (n, c) + xs
self.assertAlmostEqual(img[img_index], v)
def test_col2im_1_cpu(self):
ndim = len(self.dims)
ksize = (1,) * ndim
stride = (1,) * ndim
pad = (1,) * ndim
self.check_col2im_nd(ksize, stride, pad, gpu=False)
def test_col2im_2_cpu(self):
ndim = len(self.dims)
ksize = (2,) * ndim
stride = (2,) * ndim
pad = (2,) * ndim
self.check_col2im_nd(ksize, stride, pad, gpu=False)
def test_col2im_3_cpu(self):
ndim = len(self.dims)
ksize = (1, 2, 1)[:ndim]
stride = (2, 1, 2)[:ndim]
pad = (1, 2, 1)[:ndim]
self.check_col2im_nd(ksize, stride, pad, gpu=False)
@attr.gpu
def test_col2im_1_gpu(self):
ndim = len(self.dims)
ksize = (1,) * ndim
stride = (1,) * ndim
pad = (1,) * ndim
self.check_col2im_nd(ksize, stride, pad, gpu=True)
@attr.gpu
def test_col2im_2_gpu(self):
ndim = len(self.dims)
ksize = (2,) * ndim
stride = (2,) * ndim
pad = (2,) * ndim
self.check_col2im_nd(ksize, stride, pad, gpu=True)
@attr.gpu
def test_col2im_3_gpu(self):
ndim = len(self.dims)
ksize = (1, 2, 1)[:ndim]
stride = (2, 1, 2)[:ndim]
pad = (1, 2, 1)[:ndim]
self.check_col2im_nd(ksize, stride, pad, gpu=True)
class TestCol2ImNDParameterRanks(unittest.TestCase):
def setUp(self):
self.dims = (4, 3)
self.ksize = (2, 2)
self.stride = (1, 1)
self.pad = (0, 0)
self.outs = tuple(conv_nd.get_conv_outsize(d, k, s, p)
for (d, k, s, p) in zip(
self.dims, self.ksize, self.stride, self.pad))
col_shape = (2, 3) + self.ksize + self.outs
self.col = numpy.random.uniform(-1, 1, col_shape).astype(numpy.float32)
def test_col2im_nd_cpu_parameter_ranks(self):
# Invalid ksize length.
col_shape = (2, 3) + (2,) + self.outs
col = numpy.random.uniform(-1, 1, col_shape).astype(numpy.float32)
with self.assertRaises(AssertionError):
conv_nd.col2im_nd_cpu(col, self.stride, self.pad, self.dims)
# Invalid stride length.
with self.assertRaises(AssertionError):
conv_nd.col2im_nd_cpu(self.col, (1,), self.pad, self.dims)
# Invalid pad length.
with self.assertRaises(AssertionError):
conv_nd.col2im_nd_cpu(self.col, self.stride, (0,), self.dims)
# Invalid dims length.
with self.assertRaises(AssertionError):
conv_nd.col2im_nd_cpu(self.col, self.stride, self.pad, (4,))
@attr.gpu
def test_col2im_nd_gpu_parameter_ranks(self):
# Invalid ksize length.
col_shape = (2, 3) + (2,) + self.outs
col = numpy.random.uniform(-1, 1, col_shape).astype(numpy.float32)
col_gpu = cuda.to_gpu(col)
with self.assertRaises(AssertionError):
conv_nd.col2im_nd_gpu(col_gpu, self.stride, self.pad, self.dims)
col_gpu = cuda.to_gpu(self.col)
# Invalid stride length.
with self.assertRaises(AssertionError):
conv_nd.col2im_nd_gpu(col_gpu, (1,), self.pad, self.dims)
# Invalid pad length.
with self.assertRaises(AssertionError):
conv_nd.col2im_nd_gpu(col_gpu, self.stride, (0,), self.dims)
# Invalid dims length.
with self.assertRaises(AssertionError):
conv_nd.col2im_nd_gpu(col_gpu, self.stride, self.pad, (4,))
testing.run_module(__name__, __file__)
| 10,391
| 32.960784
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/utils_tests/test_conv_nd_kernel.py
|
import unittest
import mock
import chainer
from chainer import testing
from chainer.testing import attr
from chainer.utils import conv_nd_kernel
@testing.parameterize(*testing.product({
'ndim': [2, 3, 4],
}))
@attr.gpu
class TestIm2colNDKernelMemo(unittest.TestCase):
def setUp(self):
chainer.backends.cuda.clear_memo()
def test_im2col_nd_kernel_memo(self):
ndim = self.ndim
with mock.patch(
'chainer.utils.conv_nd_kernel.Im2colNDKernel._generate') as m:
conv_nd_kernel.Im2colNDKernel.generate(ndim)
m.assert_called_once_with(ndim)
conv_nd_kernel.Im2colNDKernel.generate(ndim)
m.assert_called_once_with(ndim)
@testing.parameterize(*testing.product({
'ndim': [2, 3, 4],
}))
@attr.gpu
class TestCol2imNDKernelMemo(unittest.TestCase):
def setUp(self):
chainer.backends.cuda.clear_memo()
def test_col2im_nd_kernel_memo(self):
ndim = self.ndim
with mock.patch(
'chainer.utils.conv_nd_kernel.Col2imNDKernel._generate') as m:
conv_nd_kernel.Col2imNDKernel.generate(ndim)
m.assert_called_once_with(ndim)
conv_nd_kernel.Col2imNDKernel.generate(ndim)
m.assert_called_once_with(ndim)
testing.run_module(__name__, __file__)
| 1,324
| 25.5
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/utils_tests/test_sparse.py
|
import unittest
import numpy
from chainer import testing
from chainer import utils
def _setup_tensor(_min, _max, shape, dtype, threshold=None):
y = numpy.random.uniform(_min, _max, shape).astype(dtype)
if threshold is not None:
y[y < threshold] = 0
return y
@testing.parameterize(*testing.product({
'shape': [(2, 3), (3, 4)],
'nbatch': [0, 1, 4],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_ldnz': [False, True],
}))
class TestCooMatrix(unittest.TestCase):
def test_to_dense(self):
if self.nbatch > 0:
x_shape = (self.nbatch, self.shape[0], self.shape[1])
else:
x_shape = self.shape
x0 = _setup_tensor(.5, 1, x_shape, self.dtype, .75)
if self.use_ldnz:
ldnz = self.shape[0] * self.shape[1]
sp_x = utils.to_coo(x0, ldnz=ldnz)
else:
sp_x = utils.to_coo(x0)
assert sp_x.data.shape == sp_x.row.shape == sp_x.col.shape
if self.nbatch > 0:
assert sp_x.data.ndim == 2
assert sp_x.data.shape[0] == self.nbatch
if self.use_ldnz:
assert sp_x.data.shape[1] == ldnz
else:
max_nnz = 0
for i in range(self.nbatch):
max_nnz = max(max_nnz, numpy.count_nonzero(x0[i]))
assert sp_x.data.shape[1] == max_nnz
else:
assert sp_x.data.ndim == 1
if self.use_ldnz:
assert sp_x.data.shape[0] == ldnz
else:
max_nnz = numpy.count_nonzero(x0)
assert sp_x.data.shape[0] == max_nnz
x1 = sp_x.to_dense()
numpy.testing.assert_array_equal(x0, x1)
@testing.parameterize(*testing.product({
'nbatch': [0, 1],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
}))
class TestCooMatrixDuplicateIndices(unittest.TestCase):
def test_to_dense(self, backend_config):
xp = backend_config.xp
data = xp.array([3., 0., 4., -5.], dtype=xp.float32)
row = xp.array([0, 0, 1, 0])
col = xp.array([0, 1, 2, 0])
if self.nbatch == 1:
data = data[xp.newaxis]
row = row[xp.newaxis]
col = col[xp.newaxis]
x0 = xp.array([[-2., 0., 0.], [0., 0., 4.]], dtype=xp.float32)
if self.nbatch == 1:
x0 = x0[xp.newaxis]
x1 = utils.CooMatrix(data, row, col, (2, 3)).to_dense()
xp.testing.assert_array_equal(x0, x1)
@testing.parameterize(*testing.product({
'shape': [(2, 3), (3, 4)],
'nbatch': [0, 1, 4],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestGetOrder(unittest.TestCase):
def test_get_order(self):
if self.nbatch > 0:
x_shape = (self.nbatch, self.shape[0], self.shape[1])
else:
x_shape = self.shape
x0 = _setup_tensor(.5, 1, x_shape, self.dtype, .75)
x0 = numpy.ascontiguousarray(x0)
if self.nbatch > 0:
x0[0, 0, self.shape[1]-1] = 1.
x0[0, self.shape[0]-1, 0] = 1.
else:
x0[0, self.shape[1]-1] = 1.
x0[self.shape[0]-1, 0] = 1.
sp_x = utils.to_coo(x0)
row = sp_x.row
col = sp_x.col
assert utils.get_order(row, col) == 'C'
assert utils.get_order(col, row) == 'F'
class TestGetOrder2(unittest.TestCase):
def test_other_order(self):
row = numpy.array((0, 2, 4, 1, 3), dtype=numpy.int32)
col = numpy.array((1, 3, 0, 2, 4), dtype=numpy.int32)
assert utils.get_order(row, col) == 'other'
assert utils.get_order(col, row) == 'other'
def test_diag_order(self):
row = numpy.array((0, 0, 1, 1, 2), dtype=numpy.int32)
col = numpy.array((0, 1, 1, 2, 2), dtype=numpy.int32)
assert utils.get_order(row, col) == 'C'
assert utils.get_order(col, row) == 'C'
def test_invalid_shape(self):
row = numpy.array((0, 2, 4, 1), dtype=numpy.int32)
col = numpy.array((1, 3, 0, 2, 4), dtype=numpy.int32)
with self.assertRaises(ValueError):
utils.get_order(row, col)
def test_invalid_index_combinatin(self):
row = numpy.array((0, 2, 4, 1, -1), dtype=numpy.int32)
col = numpy.array((1, 3, 0, -1, 4), dtype=numpy.int32)
with self.assertRaises(ValueError):
utils.get_order(row, col)
testing.run_module(__name__, __file__)
| 4,614
| 30.182432
| 70
|
py
|
chainer
|
chainer-master/tests/chainer_tests/utils_tests/test_array.py
|
import unittest
import numpy
from chainer import testing
from chainer.utils import array
@testing.parameterize(
{'shape': ()},
{'shape': (2, 3)},
{'shape': (1,)},
{'shape': (0,)},
{'shape': (5, 6, 7)},
{'shape': (0, 3)},
{'shape': (2, 0)},
{'shape': (5, 0, 7)},
)
class TestSizeOfShape(unittest.TestCase):
def test_size_of_shape(self):
arr = numpy.empty(self.shape)
size = array.size_of_shape(arr.shape)
size_expect = arr.size
assert type(size) == type(size_expect)
assert size == size_expect
@testing.parameterize(
{'in_shape': (), 'out_shape': ()},
{'in_shape': (3,), 'out_shape': ()},
{'in_shape': (3,), 'out_shape': (1,)},
{'in_shape': (3,), 'out_shape': (3,)},
{'in_shape': (2, 3), 'out_shape': ()},
{'in_shape': (2, 3), 'out_shape': (3,)},
{'in_shape': (2, 3), 'out_shape': (1, 3)},
{'in_shape': (2, 3), 'out_shape': (2, 1)},
{'in_shape': (2, 3), 'out_shape': (2, 3)},
{'in_shape': (2, 3, 4), 'out_shape': (3, 1)},
)
class TestSumTo(unittest.TestCase):
def test_sum_to(self):
n_elems = numpy.prod(self.in_shape)
x = numpy.arange(1, n_elems + 1, dtype=numpy.float32).reshape(
self.in_shape)
y_actual = array.sum_to(x, self.out_shape)
y_expect = numpy.zeros(self.out_shape, x.dtype)
for dst, src in numpy.nditer(
[y_expect, x], ['reduce_ok'], [['readwrite'], ['readonly']]):
dst += src
numpy.testing.assert_array_equal(y_expect, y_actual)
testing.run_module(__name__, __file__)
| 1,601
| 27.105263
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/utils_tests/test_precision.py
|
import unittest
import numpy
from chainer import function_node
from chainer import testing
from chainer.utils import precision
class F(function_node.FunctionNode):
@precision._fp16_mixed_precision_helper
def forward(self, x):
self.x = x
return x
class G(function_node.FunctionNode):
@precision._fp16_mixed_precision_helper
def forward(self, x):
return None,
class TestMixedPrecision(unittest.TestCase):
def test_fp16(self):
x = (numpy.zeros((1, 2, 3), dtype=numpy.float16),
numpy.zeros((1, 2, 3), dtype=numpy.float16))
f = F()
y = f.apply(x)
assert f.x[0].dtype == numpy.float32
assert f.x[1].dtype == numpy.float32
assert y[0].dtype == numpy.float16
assert y[1].dtype == numpy.float16
def test_fp32(self):
x = (numpy.zeros((1, 2, 3), dtype=numpy.float32),
numpy.zeros((1, 2, 3), dtype=numpy.float32))
f = F()
y = f.apply(x)
assert f.x[0] is x[0]
assert f.x[1] is x[1]
assert y[0].dtype == numpy.float32
assert y[1].dtype == numpy.float32
def test_fp64(self):
x = (numpy.zeros((1, 2, 3), dtype=numpy.float64),
numpy.zeros((1, 2, 3), dtype=numpy.float64))
f = F()
y = f.apply(x)
assert f.x[0] is x[0]
assert f.x[1] is x[1]
assert y[0].dtype == numpy.float64
assert y[1].dtype == numpy.float64
def test_float16_int8(self):
x = (numpy.zeros((1, 2, 3), dtype=numpy.float16),
numpy.zeros((1, 2, 3), dtype=numpy.int8))
f = F()
y = f.apply(x)
assert f.x[0].dtype == numpy.float32
assert f.x[1] is x[1]
assert y[0].dtype == numpy.float16
assert y[1].dtype == numpy.int8
def test_none(self):
x = numpy.zeros((1, 2, 3), dtype=numpy.float64),
y = G().apply(x)
assert y[0].data is None
testing.run_module(__name__, __file__)
| 1,990
| 25.905405
| 57
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.