repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
chainer
|
chainer-master/tests/chainer_tests/datasets_tests/test_pickle_dataset.py
|
import ctypes
import io
import multiprocessing
import os
import pickle
import platform
import sys
import unittest
import mock
from chainer import datasets
from chainer.datasets import pickle_dataset
from chainer import testing
from chainer import utils
class ReaderMock(object):
def __init__(self, io_):
self.io = io_
self._lock = multiprocessing.RLock()
self._hook_called = multiprocessing.Value(ctypes.c_int, 0, lock=False)
self._last_caller_pid = multiprocessing.Value(
ctypes.c_int, -1, lock=False)
@property
def n_hook_called(self):
with self._lock:
return self._hook_called.value
@property
def last_caller_pid(self):
with self._lock:
return self._last_caller_pid.value
def __getattr__(self, name):
return getattr(self.io, name)
def after_fork(self):
with self._lock:
self._hook_called.value += 1
self._last_caller_pid.value = os.getpid()
class TestPickleDataset(unittest.TestCase):
def setUp(self):
self.io = io.BytesIO()
def test_write_read(self):
writer = datasets.PickleDatasetWriter(self.io)
writer.write(1)
writer.write('hello')
writer.write(1.5)
writer.flush()
dataset = datasets.PickleDataset(self.io)
assert len(dataset) == 3
assert dataset[0] == 1
assert dataset[2] == 1.5
assert dataset[1] == 'hello'
def test_picklable(self):
writer = datasets.PickleDatasetWriter(self.io)
writer.write(1)
writer.flush()
dataset = datasets.PickleDataset(self.io)
dataset = pickle.loads(pickle.dumps(dataset))
assert len(dataset) == 1
assert dataset[0] == 1
@unittest.skipIf(platform.system() == 'Windows',
'Windows does not support `fork` method')
def test_after_fork(self):
writer = datasets.PickleDatasetWriter(self.io)
writer.write(1)
writer.flush()
reader = ReaderMock(self.io)
# Assign to avoid destruction of the instance
# before creation a child process
dataset = datasets.PickleDataset(reader)
assert reader.n_hook_called == 0
ctx = multiprocessing.get_context('fork')
p = ctx.Process()
p.start()
p.join()
assert reader.n_hook_called == 1
assert reader.last_caller_pid == p.pid
# Touch to suppress "unused variable' warning
del dataset
class TestPickleDatasetHelper(unittest.TestCase):
def setUp(self):
self.tempdir = utils.tempdir()
dirpath = self.tempdir.__enter__()
self.path = os.path.join(dirpath, 'test.pkl')
def tearDown(self):
self.tempdir.__exit__(*sys.exc_info())
def test_write_read(self):
with datasets.open_pickle_dataset_writer(self.path) as writer:
writer.write(1)
with datasets.open_pickle_dataset(self.path) as dataset:
assert dataset[0] == 1
def test_file_reader_after_fork(self):
m = mock.mock_open()
with mock.patch('chainer.datasets.pickle_dataset.open', m):
r = pickle_dataset._FileReader(self.path)
m.assert_called_once_with(self.path, 'rb')
m().close.assert_not_called()
m.reset_mock()
r.after_fork()
m.assert_called_once_with(self.path, 'rb')
m().close.assert_called_once_with()
def test_file_reader_picklable(self):
m = mock.mock_open()
with mock.patch('chainer.datasets.pickle_dataset.open', m):
r = pickle_dataset._FileReader(self.path)
m.assert_called_once_with(self.path, 'rb')
m.reset_mock()
pickle.loads(pickle.dumps(r))
m.assert_called_once_with(self.path, 'rb')
testing.run_module(__name__, __file__)
| 3,912
| 26.556338
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/datasets_tests/test_dict_dataset.py
|
import unittest
import numpy
from chainer.backends import cuda
from chainer import datasets
from chainer import testing
from chainer.testing import attr
class TestDictDataset(unittest.TestCase):
def setUp(self):
self.x = numpy.random.rand(3, 4)
self.y = numpy.random.rand(3, 5)
self.z = numpy.random.rand(4, 4)
def check_dict_dataset(self, x, y):
dd = datasets.DictDataset(x=x, y=y)
self.assertEqual(len(dd), len(x))
for i in range(len(x)):
example = dd[i]
self.assertIn('x', example)
self.assertIn('y', example)
numpy.testing.assert_array_equal(
cuda.to_cpu(example['x']), cuda.to_cpu(x[i]))
numpy.testing.assert_array_equal(
cuda.to_cpu(example['y']), cuda.to_cpu(y[i]))
example_range = dd[0: len(x)]
for i in range(len(x)):
example = example_range[i]
self.assertIn('x', example)
self.assertIn('y', example)
numpy.testing.assert_array_equal(
cuda.to_cpu(example['x']), cuda.to_cpu(x[i]))
numpy.testing.assert_array_equal(
cuda.to_cpu(example['y']), cuda.to_cpu(y[i]))
def test_dict_dataset_cpu(self):
self.check_dict_dataset(self.x, self.y)
@attr.gpu
def test_dict_dataset_gpu(self):
self.check_dict_dataset(cuda.to_gpu(self.x), cuda.to_gpu(self.y))
def test_dict_dataset_len_mismatch(self):
with self.assertRaises(ValueError):
datasets.DictDataset(x=self.x, z=self.z)
def test_dict_dtaset_overrun(self):
dd = datasets.DictDataset(x=self.x, y=self.y)
with self.assertRaises(IndexError):
dd[3]
testing.run_module(__name__, __file__)
| 1,790
| 28.360656
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/datasets_tests/test_cifar.py
|
import os
import unittest
import mock
import numpy
from chainer.dataset import download
from chainer.datasets import get_cifar10
from chainer.datasets import get_cifar100
from chainer.datasets import tuple_dataset
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'withlabel': [True, False],
'ndim': [1, 3],
'scale': [1., 255.]
}))
class TestCifar(unittest.TestCase):
def setUp(self):
self.root = download.get_dataset_directory(
os.path.join('pfnet', 'chainer', 'cifar'))
def tearDown(self):
if hasattr(self, 'cached_file') and os.path.exists(self.cached_file):
os.remove(self.cached_file)
@attr.slow
def test_get_cifar10(self):
self.check_retrieval_once('cifar-10.npz', get_cifar10)
@attr.slow
def test_get_cifar100(self):
self.check_retrieval_once('cifar-100.npz', get_cifar100)
def check_retrieval_once(self, name, retrieval_func):
self.cached_file = os.path.join(self.root, name)
train, test = retrieval_func(withlabel=self.withlabel, ndim=self.ndim,
scale=self.scale)
for cifar_dataset in (train, test):
if self.withlabel:
self.assertIsInstance(cifar_dataset,
tuple_dataset.TupleDataset)
cifar_dataset = cifar_dataset._datasets[0]
else:
self.assertIsInstance(cifar_dataset, numpy.ndarray)
if self.ndim == 1:
self.assertEqual(cifar_dataset.ndim, 2)
else:
# self.ndim == 3
self.assertEqual(cifar_dataset.ndim, 4)
self.assertEqual(cifar_dataset.shape[2],
cifar_dataset.shape[3]) # 32
# test caching - call twice
@attr.slow
def test_get_cifar10_cached(self):
self.check_retrieval_twice('cifar-10.npz', get_cifar10)
@attr.slow
def test_get_cifar100_cached(self):
self.check_retrieval_twice('cifar-100.npz', get_cifar100)
def check_retrieval_twice(self, name, retrieval_func):
self.cached_file = os.path.join(self.root, name)
train, test = retrieval_func(withlabel=self.withlabel, ndim=self.ndim,
scale=self.scale)
with mock.patch('chainer.datasets.cifar.numpy', autospec=True) as \
mnumpy:
train, test = retrieval_func(withlabel=self.withlabel,
ndim=self.ndim,
scale=self.scale)
mnumpy.savez_compressed.assert_not_called() # creator() not called
self.assertEqual(mnumpy.load.call_count, 1)
testing.run_module(__name__, __file__)
| 2,815
| 32.927711
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/datasets_tests/test_transform_dataset.py
|
import unittest
import numpy
from chainer import datasets
from chainer import testing
def _create_list_tuples(shape1, shape2, length):
return [(numpy.random.uniform(shape1), numpy.random.uniform(shape2)) for
_ in range(length)]
@testing.parameterize(
{'dataset': numpy.random.uniform(size=(2, 3, 32, 32))},
{'dataset': _create_list_tuples((3, 32, 32), (32, 32), 5)}
)
class TestTransformDataset(unittest.TestCase):
def setUp(self):
def transform(in_data):
if isinstance(in_data, tuple):
return tuple([example * 3 for example in in_data])
else:
return in_data * 3
self.transform = transform
def test_transform_dataset(self):
td = datasets.TransformDataset(self.dataset, self.transform)
self.assertEqual(len(td), len(self.dataset))
for i in range(len(td)):
example = td[i]
if isinstance(example, tuple):
for j, arr in enumerate(example):
numpy.testing.assert_array_equal(
arr, self.transform(self.dataset[i][j]))
else:
numpy.testing.assert_array_equal(
example, self.transform(self.dataset[i]))
def test_transform_dataset_overrun(self):
td = datasets.TransformDataset(self.dataset, self.transform)
with self.assertRaises(IndexError):
td[len(td) + 1]
testing.run_module(__name__, __file__)
| 1,491
| 29.44898
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/datasets_tests/test_mnist.py
|
import importlib
import os
import unittest
import mock
import numpy
from chainer.dataset import download
from chainer.datasets import get_fashion_mnist
from chainer.datasets import get_fashion_mnist_labels
from chainer.datasets import get_kuzushiji_mnist
from chainer.datasets import get_kuzushiji_mnist_labels
from chainer.datasets import get_mnist
from chainer.datasets import tuple_dataset
from chainer import testing
from chainer.testing import attr
_fashion_mnist_labels = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
_kuzushiji_mnist_labels = [('o', u'\u304A'), ('ki', u'\u304D'),
('su', u'\u3059'), ('tsu', u'\u3064'),
('na', u'\u306A'), ('ha', u'\u306F'),
('ma', u'\u307E'), ('ya', u'\u3084'),
('re', u'\u308C'), ('wo', u'\u3092')]
@testing.parameterize(*testing.product({
'withlabel': [True, False],
'ndim': [1, 3],
'scale': [1., 255.],
'rgb_format': [True, False]
}))
class TestMnist(unittest.TestCase):
def setUp(self):
self.mnist_root = download.get_dataset_directory(
os.path.join('pfnet', 'chainer', 'mnist'))
self.kuzushiji_mnist_root = download.get_dataset_directory(
os.path.join('pfnet', 'chainer', 'kuzushiji_mnist'))
self.fashion_mnist_root = download.get_dataset_directory(
os.path.join('pfnet', 'chainer', 'fashion-mnist'))
def tearDown(self):
if (hasattr(self, 'cached_train_file') and
os.path.exists(self.cached_train_file)):
os.remove(self.cached_train_file)
if (hasattr(self, 'cached_test_file') and
os.path.exists(self.cached_test_file)):
os.remove(self.cached_test_file)
@attr.slow
def test_get_mnist(self):
self.check_retrieval_once('train.npz', 'test.npz',
self.mnist_root, get_mnist)
def test_get_kuzushiji_mnist_labels(self):
self.assertEqual(get_kuzushiji_mnist_labels(), _kuzushiji_mnist_labels)
@attr.slow
def test_get_kuzushiji_mnist(self):
self.check_retrieval_once('train.npz', 'test.npz',
self.kuzushiji_mnist_root,
get_kuzushiji_mnist)
def test_get_fashion_mnist_labels(self):
self.assertEqual(get_fashion_mnist_labels(), _fashion_mnist_labels)
@attr.slow
def test_get_fashion_mnist(self):
self.check_retrieval_once('train.npz', 'test.npz',
self.fashion_mnist_root,
get_fashion_mnist)
def check_retrieval_once(self, train_name, test_name, root,
retrieval_func):
self.cached_train_file = os.path.join(root, train_name)
self.cached_test_file = os.path.join(root, test_name)
train, test = retrieval_func(withlabel=self.withlabel,
ndim=self.ndim,
scale=self.scale,
rgb_format=self.rgb_format)
for mnist_dataset in (train, test):
if self.withlabel:
self.assertIsInstance(mnist_dataset,
tuple_dataset.TupleDataset)
mnist_dataset = mnist_dataset._datasets[0]
else:
self.assertIsInstance(mnist_dataset, numpy.ndarray)
if self.ndim == 1:
self.assertEqual(mnist_dataset.ndim, 2)
else:
# self.ndim == 3
self.assertEqual(mnist_dataset.ndim, 4)
self.assertEqual(mnist_dataset.shape[2],
mnist_dataset.shape[3]) # 32
# test caching - call twice
@attr.slow
def test_get_mnist_cached(self):
self.check_retrieval_twice('train.npz', 'test.npz',
self.mnist_root,
get_mnist,
'chainer.datasets.mnist')
@attr.slow
def test_get_kuzushiji_mnist_cached(self):
self.check_retrieval_twice('train.npz', 'test.npz',
self.kuzushiji_mnist_root,
get_kuzushiji_mnist,
'chainer.datasets.kuzushiji_mnist')
@attr.slow
def test_get_fashion_mnist_cached(self):
self.check_retrieval_twice('train.npz', 'test.npz',
self.fashion_mnist_root,
get_fashion_mnist,
'chainer.datasets.fashion_mnist')
def check_retrieval_twice(self, train_name, test_name, root,
retrieval_func, package):
self.cached_train_file = os.path.join(root, train_name)
self.cached_test_file = os.path.join(root, test_name)
train, test = retrieval_func(withlabel=self.withlabel,
ndim=self.ndim,
scale=self.scale,
rgb_format=self.rgb_format)
numpy = importlib.import_module('numpy', package=package)
with mock.patch.object(numpy, 'savez_compressed') as savez_compressed:
with mock.patch.object(numpy, 'load', wraps=numpy.load) as load:
train, test = retrieval_func(withlabel=self.withlabel,
ndim=self.ndim,
scale=self.scale,
rgb_format=self.rgb_format)
savez_compressed.assert_not_called() # creator() not called
self.assertEqual(load.call_count, 2) # for training and test
testing.run_module(__name__, __file__)
| 5,935
| 40.222222
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/datasets_tests/test_sub_dataset.py
|
import unittest
from chainer import datasets
from chainer import testing
class TestSubDataset(unittest.TestCase):
def test_sub_dataset(self):
original = [1, 2, 3, 4, 5]
subset = datasets.SubDataset(original, 1, 4)
self.assertEqual(len(subset), 3)
self.assertEqual(subset[0], 2)
self.assertEqual(subset[1], 3)
self.assertEqual(subset[2], 4)
def test_sub_dataset_overrun(self):
original = [1, 2, 3, 4, 5]
subset = datasets.SubDataset(original, 1, 4)
with self.assertRaises(IndexError):
subset[len(subset)]
def test_permuted_sub_dataset(self):
original = [1, 2, 3, 4, 5]
subset = datasets.SubDataset(original, 1, 4, [2, 0, 3, 1, 4])
self.assertEqual(len(subset), 3)
self.assertEqual(subset[0], 1)
self.assertEqual(subset[1], 4)
self.assertEqual(subset[2], 2)
def test_permuted_sub_dataset_len_mismatch(self):
original = [1, 2, 3, 4, 5]
with self.assertRaises(ValueError):
datasets.SubDataset(original, 1, 4, [2, 0, 3, 1])
class TestSplitDataset(unittest.TestCase):
def test_split_dataset(self):
original = [1, 2, 3, 4, 5]
subset1, subset2 = datasets.split_dataset(original, 2)
self.assertEqual(len(subset1), 2)
self.assertEqual(subset1[0], 1)
self.assertEqual(subset1[1], 2)
self.assertEqual(len(subset2), 3)
self.assertEqual(subset2[0], 3)
self.assertEqual(subset2[1], 4)
self.assertEqual(subset2[2], 5)
def test_split_dataset_head(self):
original = [1, 2, 3, 4, 5]
subset1, subset2 = datasets.split_dataset(original, 0)
self.assertEqual(len(subset1), 0)
self.assertEqual(len(subset2), 5)
def test_split_dataset_tail(self):
original = [1, 2, 3, 4, 5]
subset1, subset2 = datasets.split_dataset(original, 5)
self.assertEqual(len(subset1), 5)
self.assertEqual(len(subset2), 0)
def test_split_dataset_invalid_position(self):
original = [1, 2, 3, 4, 5]
with self.assertRaises(ValueError):
datasets.split_dataset(original, -1)
with self.assertRaises(ValueError):
datasets.split_dataset(original, 6)
def test_split_dataset_invalid_type(self):
original = [1, 2, 3, 4, 5]
with self.assertRaises(TypeError):
datasets.split_dataset(original, 3.5)
def test_permuted_split_dataset(self):
original = [1, 2, 3, 4, 5]
subset1, subset2 = datasets.split_dataset(original, 2, [2, 0, 3, 1, 4])
self.assertEqual(len(subset1), 2)
self.assertEqual(subset1[0], 3)
self.assertEqual(subset1[1], 1)
self.assertEqual(len(subset2), 3)
self.assertEqual(subset2[0], 4)
self.assertEqual(subset2[1], 2)
self.assertEqual(subset2[2], 5)
def test_split_dataset_with_invalid_length_permutation(self):
original = [1, 2, 3, 4, 5]
with self.assertRaises(ValueError):
datasets.split_dataset(original, 2, [2, 0, 3, 1])
with self.assertRaises(ValueError):
datasets.split_dataset(original, 2, [2, 0, 3, 1, 4, 5])
def test_split_dataset_random(self):
original = [1, 2, 3, 4, 5]
subset1, subset2 = datasets.split_dataset_random(original, 2)
reconst = sorted(set(subset1).union(subset2))
self.assertEqual(reconst, original)
subset1a, subset2a = datasets.split_dataset_random(original, 2, seed=3)
reconst = sorted(set(subset1a).union(subset2a))
self.assertEqual(reconst, original)
subset1b, subset2b = datasets.split_dataset_random(original, 2, seed=3)
self.assertEqual(set(subset1a), set(subset1b))
self.assertEqual(set(subset2a), set(subset2b))
reconst = sorted(set(subset1a).union(subset2a))
self.assertEqual(reconst, original)
def test_split_dataset_n(self):
original = list(range(7))
subsets = datasets.split_dataset_n(original, 3)
self.assertEqual(len(subsets), 3)
self.assertEqual(list(subsets[0]), original[:2])
self.assertEqual(list(subsets[1]), original[2:4])
self.assertEqual(list(subsets[2]), original[4:6])
order = list(range(6, -1, -1))
subsets = datasets.split_dataset_n(original, 2, order)
self.assertEqual(len(subsets), 2)
self.assertEqual(list(subsets[0]), [6, 5, 4])
self.assertEqual(list(subsets[1]), [3, 2, 1])
original = list(range(6))
subsets = datasets.split_dataset_n(original, 3)
self.assertEqual(len(subsets), 3)
self.assertEqual(list(subsets[0]), original[:2])
self.assertEqual(list(subsets[1]), original[2:4])
self.assertEqual(list(subsets[2]), original[4:6])
def test_split_dataset_n_random(self):
original = list(range(6))
subsets = datasets.split_dataset_n_random(original, 2)
reconst = sorted(set(subsets[0]).union(subsets[1]))
self.assertEqual(reconst, original)
subsets1 = datasets.split_dataset_n_random(original, 2, seed=3)
reconst = sorted(set(subsets1[0]).union(subsets1[1]))
self.assertEqual(reconst, original)
subsets2 = datasets.split_dataset_n_random(original, 2, seed=3)
self.assertEqual(set(subsets1[0]), set(subsets2[0]))
self.assertEqual(set(subsets1[1]), set(subsets2[1]))
original = list(range(7))
subsets = datasets.split_dataset_n_random(original, 3)
self.assertEqual(len(subsets), 3)
for subset in subsets:
self.assertEqual(len(subset), 2)
reconst = set(subsets[0]).union(subsets[1]).union(subsets[2])
self.assertEqual(len(reconst), 6)
class TestGetCrossValidationDatasets(unittest.TestCase):
def test_get_cross_validation_datasets(self):
original = [1, 2, 3, 4, 5, 6]
cv1, cv2, cv3 = datasets.get_cross_validation_datasets(original, 3)
tr1, te1 = cv1
self.assertEqual(len(tr1), 4)
self.assertEqual(tr1[0], 1)
self.assertEqual(tr1[1], 2)
self.assertEqual(tr1[2], 3)
self.assertEqual(tr1[3], 4)
self.assertEqual(len(te1), 2)
self.assertEqual(te1[0], 5)
self.assertEqual(te1[1], 6)
tr2, te2 = cv2
self.assertEqual(len(tr2), 4)
self.assertEqual(tr2[0], 5)
self.assertEqual(tr2[1], 6)
self.assertEqual(tr2[2], 1)
self.assertEqual(tr2[3], 2)
self.assertEqual(len(te2), 2)
self.assertEqual(te2[0], 3)
self.assertEqual(te2[1], 4)
tr3, te3 = cv3
self.assertEqual(len(tr3), 4)
self.assertEqual(tr3[0], 3)
self.assertEqual(tr3[1], 4)
self.assertEqual(tr3[2], 5)
self.assertEqual(tr3[3], 6)
self.assertEqual(len(te3), 2)
self.assertEqual(te3[0], 1)
self.assertEqual(te3[1], 2)
def test_get_cross_validation_datasets_2(self):
original = [1, 2, 3, 4, 5, 6, 7]
cv1, cv2, cv3 = datasets.get_cross_validation_datasets(original, 3)
tr1, te1 = cv1
self.assertEqual(len(tr1), 4)
self.assertEqual(tr1[0], 1)
self.assertEqual(tr1[1], 2)
self.assertEqual(tr1[2], 3)
self.assertEqual(tr1[3], 4)
self.assertEqual(len(te1), 3)
self.assertEqual(te1[0], 5)
self.assertEqual(te1[1], 6)
self.assertEqual(te1[2], 7)
tr2, te2 = cv2
self.assertEqual(len(tr2), 5)
self.assertEqual(tr2[0], 5)
self.assertEqual(tr2[1], 6)
self.assertEqual(tr2[2], 7)
self.assertEqual(tr2[3], 1)
self.assertEqual(tr2[4], 2)
self.assertEqual(len(te2), 2)
self.assertEqual(te2[0], 3)
self.assertEqual(te2[1], 4)
tr3, te3 = cv3
self.assertEqual(len(tr3), 5)
self.assertEqual(tr3[0], 3)
self.assertEqual(tr3[1], 4)
self.assertEqual(tr3[2], 5)
self.assertEqual(tr3[3], 6)
self.assertEqual(tr3[4], 7)
self.assertEqual(len(te3), 2)
self.assertEqual(te3[0], 1)
self.assertEqual(te3[1], 2)
def test_get_cross_validation_datasets_random(self):
original = [1, 2, 3, 4, 5, 6]
cvs = datasets.get_cross_validation_datasets_random(original, 3)
# check if each split covers the whole dataset
for tr, te in cvs:
reconst = sorted(set(tr).union(te))
self.assertEqual(reconst, original)
self.assertEqual(len(tr) + len(te), len(original))
# check if all validation sets cover the whole dataset
validation_union = sorted(
list(cvs[0][1]) + list(cvs[1][1]) + list(cvs[2][1]))
self.assertEqual(validation_union, original)
cvs_a = datasets.get_cross_validation_datasets_random(
original, 3, seed=5)
cvs_b = datasets.get_cross_validation_datasets_random(
original, 3, seed=5)
for (tr_a, te_a), (tr_b, te_b) in zip(cvs_a, cvs_b):
self.assertEqual(set(tr_a), set(tr_b))
self.assertEqual(set(te_a), set(te_b))
testing.run_module(__name__, __file__)
| 9,204
| 36.267206
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/datasets_tests/test_image_dataset.py
|
import os
import pickle
import unittest
import numpy
from chainer import datasets
from chainer.datasets import image_dataset
from chainer import testing
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.int32],
}))
@unittest.skipUnless(image_dataset.available, 'image_dataset is not available')
class TestImageDataset(unittest.TestCase):
def setUp(self):
root = os.path.join(os.path.dirname(__file__), 'image_dataset')
path = os.path.join(root, 'img.lst')
self.dataset = datasets.ImageDataset(path, root=root, dtype=self.dtype)
def test_len(self):
self.assertEqual(len(self.dataset), 2)
def test_get(self):
img = self.dataset.get_example(0)
self.assertEqual(img.dtype, self.dtype)
self.assertEqual(img.shape, (4, 300, 300))
def test_get_grey(self):
img = self.dataset.get_example(1)
self.assertEqual(img.dtype, self.dtype)
self.assertEqual(img.shape, (1, 300, 300))
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.int32],
'label_dtype': [numpy.float32, numpy.int32],
}))
@unittest.skipUnless(image_dataset.available, 'image_dataset is not available')
class TestLabeledImageDataset(unittest.TestCase):
def setUp(self):
root = os.path.join(os.path.dirname(__file__), 'image_dataset')
path = os.path.join(root, 'labeled_img.lst')
self.dataset = datasets.LabeledImageDataset(
path, root=root, dtype=self.dtype, label_dtype=self.label_dtype)
def test_len(self):
self.assertEqual(len(self.dataset), 2)
def test_get(self):
img, label = self.dataset.get_example(0)
self.assertEqual(img.dtype, self.dtype)
self.assertEqual(img.shape, (4, 300, 300))
self.assertEqual(label.dtype, self.label_dtype)
self.assertEqual(label, 0)
def test_get_grey(self):
img, label = self.dataset.get_example(1)
self.assertEqual(img.dtype, self.dtype)
self.assertEqual(img.shape, (1, 300, 300))
self.assertEqual(label.dtype, self.label_dtype)
self.assertEqual(label, 1)
@unittest.skipUnless(image_dataset.available, 'image_dataset is not available')
class TestLabeledImageDatasetInvalidFormat(unittest.TestCase):
def test_invalid_column(self):
root = os.path.join(os.path.dirname(__file__), 'image_dataset')
path = os.path.join(root, 'img.lst')
with self.assertRaises(ValueError):
datasets.LabeledImageDataset(path)
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.int32],
}))
@unittest.skipUnless(image_dataset.available, 'image_dataset is not available')
class TestZippedImageDataset(unittest.TestCase):
def setUp(self):
root = os.path.join(os.path.dirname(__file__), 'image_dataset')
zipfilename = os.path.join(root, 'zipped_images_1.zip')
self.dataset = datasets.ZippedImageDataset(zipfilename,
dtype=self.dtype)
def test_len(self):
self.assertEqual(len(self.dataset), 2)
def test_get(self):
img = self.dataset.get_example(0)
self.assertEqual(img.dtype, self.dtype)
self.assertEqual(img.shape, (4, 300, 300))
def test_get_grey(self):
img = self.dataset.get_example(1)
self.assertEqual(img.dtype, self.dtype)
self.assertEqual(img.shape, (1, 300, 300))
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.int32],
}))
@unittest.skipUnless(image_dataset.available, 'image_dataset is not available')
class TestMultiZippedImageDataset(unittest.TestCase):
def setUp(self):
root = os.path.join(os.path.dirname(__file__), 'image_dataset')
zipfilenames = [os.path.join(root, fn) for fn
in ('zipped_images_1.zip', 'zipped_images_2.zip')]
self.dataset = datasets.MultiZippedImageDataset(zipfilenames,
dtype=self.dtype)
def test_len(self):
self.assertEqual(len(self.dataset), 5)
def _get_check(self, ds):
image_formats = ((4, 300, 300), (1, 300, 300), (4, 285, 1000),
(3, 404, 1417), (4, 404, 1417))
for i in range(5):
fmt = image_formats[i]
img = ds.get_example(i)
self.assertEqual(img.dtype, self.dtype)
self.assertEqual(img.shape, fmt)
def test_get(self):
self._get_check(self.dataset)
def test_pickle_unpickle(self):
dss = pickle.dumps(self.dataset)
ds = pickle.loads(dss)
self._get_check(ds)
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.int32],
'label_dtype': [numpy.float32, numpy.int32],
}))
@unittest.skipUnless(image_dataset.available, 'image_dataset is not available')
class TestLabeledZippedImageDataset(unittest.TestCase):
def setUp(self):
root = os.path.join(os.path.dirname(__file__), 'image_dataset')
zipfilename = os.path.join(root, 'zipped_images_1.zip')
labelfilename = os.path.join(root, 'labeled_img.lst')
self.dataset = datasets.LabeledZippedImageDataset(
zipfilename, labelfilename, dtype=self.dtype,
label_dtype=self.label_dtype)
def test_len(self):
self.assertEqual(len(self.dataset), 2)
def test_get(self):
img, label = self.dataset.get_example(0)
self.assertEqual(img.dtype, self.dtype)
self.assertEqual(img.shape, (4, 300, 300))
self.assertEqual(label.dtype, self.label_dtype)
self.assertEqual(label, 0)
def test_get_gray(self):
img, label = self.dataset.get_example(1)
self.assertEqual(img.dtype, self.dtype)
self.assertEqual(img.shape, (1, 300, 300))
self.assertEqual(label.dtype, self.label_dtype)
self.assertEqual(label, 1)
testing.run_module(__name__, __file__)
| 5,975
| 33.148571
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/datasets_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/datasets_tests/test_svhn.py
|
import os
import unittest
import mock
import numpy
from chainer.dataset import download
from chainer.datasets import get_svhn
from chainer.datasets import tuple_dataset
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'withlabel': [True, False],
'scale': [1., 255.]
}))
class TestSvhn(unittest.TestCase):
def setUp(self):
self.root = download.get_dataset_directory(
os.path.join('pfnet', 'chainer', 'svhn'))
def tearDown(self):
if hasattr(self, 'cached_files'):
for file in self.cached_files:
if os.path.exists(file):
os.remove(file)
@attr.slow
def test_get_svhn(self):
self.check_retrieval_once(['train.npz', 'test.npz'], get_svhn)
def check_retrieval_once(self, names, retrieval_func):
self.cached_files = [os.path.join(self.root, name) for name in names]
train, test = retrieval_func(withlabel=self.withlabel,
scale=self.scale)
for svhn_dataset in (train, test):
if self.withlabel:
self.assertIsInstance(svhn_dataset,
tuple_dataset.TupleDataset)
svhn_dataset = svhn_dataset._datasets[0]
else:
self.assertIsInstance(svhn_dataset, numpy.ndarray)
self.assertEqual(svhn_dataset.ndim, 4)
self.assertEqual(svhn_dataset.shape[2], svhn_dataset.shape[3])
# test caching - call twice
@attr.slow
def test_get_svhn_cached(self):
self.check_retrieval_twice(['train.npz', 'test.npz'], get_svhn)
def check_retrieval_twice(self, names, retrieval_func):
self.cached_files = [os.path.join(self.root, name) for name in names]
train, test = retrieval_func(withlabel=self.withlabel,
scale=self.scale)
with mock.patch('chainer.datasets.svhn.numpy', autospec=True) as \
mnumpy:
train, test = retrieval_func(withlabel=self.withlabel,
scale=self.scale)
mnumpy.savez_compressed.assert_not_called() # creator() not called
self.assertEqual(mnumpy.load.call_count, 2)
testing.run_module(__name__, __file__)
| 2,326
| 33.220588
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/datasets_tests/test_concatenated_dataset.py
|
import unittest
import numpy as np
import six
from chainer.datasets import ConcatenatedDataset
from chainer import testing
@testing.parameterize(
# basic usage
{'datasets': (
np.random.uniform(size=(5, 3, 48, 32)),
np.random.uniform(size=(15, 3, 64, 48)),
)},
# more than two datasets
{'datasets': (
np.random.uniform(size=(5, 3, 48, 32)),
np.random.uniform(size=(15, 3, 16, 48)),
np.random.uniform(size=(20, 3, 5, 5)),
)},
# single dataset
{'datasets': (
np.random.uniform(size=(5, 3, 48, 32)),
)},
# no dataset
{'datasets': ()},
# some datasets are empty
{'datasets': (
np.random.uniform(size=(5, 3, 48, 32)),
[],
np.random.uniform(size=(20, 3, 5, 5)),
[],
)},
# all datasets are empty
{'datasets': ([], [], [])},
)
class TestConcatenatedDataset(unittest.TestCase):
def setUp(self):
self.concatenated_dataset = ConcatenatedDataset(*self.datasets)
self.expected_dataset = [
sample for dataset in self.datasets for sample in dataset]
def test_concatenated_dataset(self):
self.assertEqual(
len(self.concatenated_dataset), len(self.expected_dataset))
for i, expected in enumerate(self.expected_dataset):
np.testing.assert_equal(self.concatenated_dataset[i], expected)
def test_concatenated_dataset_slice(self):
concatenated_slice = self.concatenated_dataset[1:8:2]
expected_slice = self.concatenated_dataset[1:8:2]
self.assertEqual(
len(concatenated_slice), len(expected_slice))
for concatenated, expected in six.moves.zip(
concatenated_slice, expected_slice):
np.testing.assert_equal(concatenated, expected)
testing.run_module(__name__, __file__)
| 1,854
| 27.538462
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/datasets_tests/image_dataset/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/test_trainer.py
|
import time
import traceback
import unittest
from chainer import testing
from chainer import training
class DummyExtension(training.extension.Extension):
def __init__(self, test_case):
self.is_called = False
self.is_finalized = False
self._test_case = test_case
def __call__(self, trainer):
self._test_case.assertTrue(trainer.is_initialized)
self.is_called = True
def finalize(self):
self.is_finalized = True
def initialize(self, trainer):
trainer.is_initialized = True
class ErrorHandlingExtension(training.extension.Extension):
def __init__(self):
self.is_error_handled = False
def __call__(self, trainer):
pass
def on_error(self, trainer, exception, tb):
traceback.print_tb(tb)
self.is_error_handled = True
def finalize(self):
pass
def initialize(self, trainer):
pass
class TheOnlyError(Exception):
pass
class DummyCallableClass(object):
def __init__(self, test_case):
self.name = 'DummyCallableClass'
self.is_called = False
self.is_finalized = False
self._test_case = test_case
def __call__(self, trainer):
self._test_case.assertTrue(trainer.is_initialized)
self.is_called = True
def finalize(self):
self.is_finalized = True
def initialize(self, trainer):
trainer.is_initialized = True
class DummyClass(object):
def __init__(self):
self.is_touched = False
def touch(self):
self.is_touched = True
class TestTrainer(unittest.TestCase):
def setUp(self):
self.trainer = self._create_mock_trainer(10)
self.trainer.is_initialized = False
def _create_mock_trainer(self, iterations):
trainer = testing.get_trainer_with_mock_updater(
(iterations, 'iteration'))
trainer.updater.update_core = lambda: time.sleep(0.001)
return trainer
def test_elapsed_time(self):
with self.assertRaises(RuntimeError):
self.trainer.elapsed_time
self.trainer.run()
self.assertGreater(self.trainer.elapsed_time, 0)
def test_elapsed_time_serialization(self):
self.trainer.run()
serialized_time = self.trainer.elapsed_time
new_trainer = self._create_mock_trainer(5)
testing.save_and_load_npz(self.trainer, new_trainer)
new_trainer.run()
self.assertGreater(new_trainer.elapsed_time, serialized_time)
def test_add_inherit_class_extension(self):
dummy_extension = DummyExtension(self)
self.trainer.extend(dummy_extension)
self.trainer.run()
self.assertTrue(dummy_extension.is_called)
self.assertTrue(dummy_extension.is_finalized)
def test_add_callable_class_extension(self):
dummy_callable_class = DummyCallableClass(self)
self.trainer.extend(dummy_callable_class)
self.trainer.run()
self.assertTrue(dummy_callable_class.is_called)
self.assertTrue(dummy_callable_class.is_finalized)
def test_add_called_before_training_extension(self):
class MyDummyCallableClass(DummyCallableClass):
def __init__(self, test_case):
super(MyDummyCallableClass, self).__init__(test_case)
self.is_called_before_training = False
def __call__(self, trainer):
if trainer.is_before_training:
self.is_called_before_training = True
return super(MyDummyCallableClass, self).__call__(trainer)
dummy_callable_class = MyDummyCallableClass(self)
self.trainer.extend(dummy_callable_class, call_before_training=True)
self.trainer.run()
self.assertTrue(dummy_callable_class.is_called)
self.assertTrue(dummy_callable_class.is_called_before_training)
self.assertTrue(dummy_callable_class.is_finalized)
def test_add_lambda_extension(self):
dummy_class = DummyClass()
self.trainer.extend(lambda x: dummy_class.touch())
self.trainer.run()
self.assertTrue(dummy_class.is_touched)
def test_add_make_extension(self):
self.is_called = False
@training.make_extension()
def dummy_extension(trainer):
self.is_called = True
self.trainer.extend(dummy_extension)
self.trainer.run()
self.assertTrue(self.is_called)
def test_add_make_extension_with_initializer(self):
self.is_called = False
def initializer(trainer):
trainer.is_initialized = True
@training.make_extension(initializer=initializer)
def dummy_extension(trainer):
self.assertTrue(trainer.is_initialized)
self.is_called = True
self.trainer.extend(dummy_extension)
self.trainer.run()
self.assertTrue(self.is_called)
def test_add_function_extension(self):
self.is_called = False
def dummy_function(trainer):
self.is_called = True
self.trainer.extend(dummy_function)
self.trainer.run()
self.assertTrue(self.is_called)
def test_add_two_extensions_default_priority(self):
self.called_order = []
@training.make_extension(trigger=(1, 'epoch'))
def dummy_extension_1(trainer):
self.called_order.append(1)
@training.make_extension(trigger=(1, 'epoch'))
def dummy_extension_2(trainer):
self.called_order.append(2)
self.trainer.extend(dummy_extension_1)
self.trainer.extend(dummy_extension_2)
self.trainer.run()
self.assertEqual(self.called_order, [1, 2])
def test_add_two_extensions_specific_priority(self):
self.called_order = []
@training.make_extension(trigger=(1, 'epoch'), priority=50)
def dummy_extension_1(trainer):
self.called_order.append(1)
@training.make_extension(trigger=(1, 'epoch'), priority=100)
def dummy_extension_2(trainer):
self.called_order.append(2)
self.trainer.extend(dummy_extension_1)
self.trainer.extend(dummy_extension_2)
self.trainer.run()
self.assertEqual(self.called_order, [2, 1])
def test_exception_handler(self):
ext = ErrorHandlingExtension()
self.trainer.extend(ext, trigger=(1, 'iteration'), priority=1)
self.assertFalse(ext.is_error_handled)
d = {}
def exception_handler(trainer, exp, tb):
d['called'] = True
@training.make_extension(trigger=(1, 'iteration'), priority=100,
on_error=exception_handler)
def exception_raiser(trainer):
raise TheOnlyError()
self.trainer.extend(exception_raiser)
dummy_extension = DummyExtension(self)
self.trainer.extend(dummy_extension)
with self.assertRaises(TheOnlyError):
self.trainer.run()
self.assertTrue(d['called'])
self.assertTrue(ext.is_error_handled)
self.assertTrue(dummy_extension.is_finalized)
def test_exception_in_exception_handler(self):
ext = ErrorHandlingExtension()
self.trainer.extend(ext, trigger=(1, 'iteration'), priority=1)
self.assertFalse(ext.is_error_handled)
def exception_handler(trainer, exp, tb):
raise ValueError('hogehoge from exception handler')
@training.make_extension(trigger=(1, 'iteration'), priority=100,
on_error=exception_handler)
def exception_raiser(trainer):
raise TheOnlyError()
self.trainer.extend(exception_raiser)
dummy_extension = DummyExtension(self)
self.trainer.extend(dummy_extension)
with self.assertRaises(TheOnlyError):
self.trainer.run()
self.assertTrue(ext.is_error_handled)
self.assertTrue(dummy_extension.is_finalized)
testing.run_module(__name__, __file__)
| 7,997
| 28.843284
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/test_extension.py
|
import unittest
import pytest
from chainer import testing
from chainer import training
class TestExtension(unittest.TestCase):
def test_raise_error_if_call_not_implemented(self):
class MyExtension(training.Extension):
pass
ext = MyExtension()
trainer = testing.get_trainer_with_mock_updater()
with pytest.raises(NotImplementedError):
ext(trainer)
def test_default_name(self):
class MyExtension(training.Extension):
pass
ext = MyExtension()
self.assertEqual(ext.default_name, 'MyExtension')
def test_deleted_invoke_before_training(self):
class MyExtension(training.Extension):
pass
ext = MyExtension()
with self.assertRaises(AttributeError):
ext.invoke_before_training
def test_make_extension(self):
def initialize(trainer):
pass
@training.make_extension(trigger=(2, 'epoch'), default_name='my_ext',
priority=50, initializer=initialize)
def my_extension(trainer):
pass
self.assertEqual(my_extension.trigger, (2, 'epoch'))
self.assertEqual(my_extension.default_name, 'my_ext')
self.assertEqual(my_extension.priority, 50)
self.assertIs(my_extension.initialize, initialize)
def test_make_extension_default_values(self):
@training.make_extension()
def my_extension(trainer):
pass
self.assertEqual(my_extension.trigger, (1, 'iteration'))
self.assertEqual(my_extension.default_name, 'my_extension')
self.assertEqual(my_extension.priority, training.PRIORITY_READER)
self.assertIsNone(my_extension.initialize)
def test_make_extension_deleted_argument(self):
with self.assertRaises(ValueError):
@training.make_extension(invoke_before_training=False)
def my_extension(_):
pass
def test_make_extension_unexpected_kwargs(self):
with self.assertRaises(TypeError):
@training.make_extension(foo=1)
def my_extension(_):
pass
testing.run_module(__name__, __file__)
| 2,203
| 29.191781
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/extensions_tests/test_plot_report.py
|
import unittest
import warnings
import pytest
from chainer import testing
from chainer.training import extensions
try:
import matplotlib
_available = True
except ImportError:
_available = False
class TestPlotReport(unittest.TestCase):
def test_available(self):
if _available:
self.assertTrue(extensions.PlotReport.available())
else:
# It shows warning only when matplotlib is not available
with pytest.warns(UserWarning):
self.assertFalse(extensions.PlotReport.available())
# In the following we explicitly use plot_report._available instead of
# PlotReport.available() because in some cases `test_available()` fails
# because it sometimes does not raise UserWarning despite
# matplotlib is not installed (this is due to the difference between
# the behavior of unittest in python2 and that in python3).
@unittest.skipUnless(_available, 'matplotlib is not installed')
def test_lazy_import(self):
# matplotlib.pyplot should be lazily imported because matplotlib.use
# has to be called earlier.
# To support python2, we do not use self.assertWarns()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
matplotlib.use('Agg')
# Test again with a different backend, because the above does not
# generate a warning if matplotlib.use('Agg') is called and then
# matplotlib.pyplot is imported.
matplotlib.use('PS')
self.assertEqual(len(w), 0)
testing.run_module(__name__, __file__)
| 1,638
| 31.78
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/extensions_tests/test_fail_on_nonnumber.py
|
import os
import shutil
import tempfile
import unittest
import warnings
import numpy
import chainer
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer import training
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
with self.init_scope():
self.l = links.Linear(1, 3)
def forward(self, x):
return self.l(x)
class Dataset(chainer.dataset.DatasetMixin):
def __init__(self, values):
self.values = values
def __len__(self):
return len(self.values)
def get_example(self, i):
return numpy.array([self.values[i]], numpy.float32), numpy.int32(i % 2)
class TestFailOnNonNumber(unittest.TestCase):
def setUp(self):
self.n_data = 4
self.n_epochs = 3
self.model = Model()
self.classifier = links.Classifier(self.model)
self.optimizer = chainer.optimizers.Adam()
self.optimizer.setup(self.classifier)
self.dataset = Dataset([i for i in range(self.n_data)])
self.iterator = chainer.iterators.SerialIterator(
self.dataset, 1, shuffle=False)
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.temp_dir)
def prepare(self, dirname='test', device=None):
outdir = os.path.join(self.temp_dir, dirname)
self.updater = training.updaters.StandardUpdater(
self.iterator, self.optimizer, device=device)
self.trainer = training.Trainer(
self.updater, (self.n_epochs, 'epoch'), out=outdir)
self.trainer.extend(training.extensions.FailOnNonNumber())
def test_trainer(self):
self.prepare(dirname='test_trainer')
self.trainer.run()
def test_nan(self):
self.prepare(dirname='test_nan')
self.model.l.W.array[1, 0] = numpy.nan
with self.assertRaises(RuntimeError):
self.trainer.run(show_loop_exception_msg=False)
def test_inf(self):
self.prepare(dirname='test_inf')
self.model.l.W.array[2, 0] = numpy.inf
# Ignore RuntimeWarning when using Adam on CPU
with warnings.catch_warnings(), self.assertRaises(RuntimeError):
warnings.filterwarnings('ignore', category=RuntimeWarning)
self.trainer.run(show_loop_exception_msg=False)
@attr.gpu
def test_trainer_gpu(self):
self.prepare(dirname='test_trainer_gpu', device=0)
self.trainer.run()
@attr.gpu
def test_nan_gpu(self):
self.prepare(dirname='test_nan_gpu', device=0)
self.model.l.W.array[:] = numpy.nan
with self.assertRaises(RuntimeError):
self.trainer.run(show_loop_exception_msg=False)
@attr.gpu
def test_inf_gpu(self):
self.prepare(dirname='test_inf_gpu', device=0)
self.model.l.W.array[:] = numpy.inf
with self.assertRaises(RuntimeError):
self.trainer.run(show_loop_exception_msg=False)
testing.run_module(__name__, __file__)
| 3,042
| 27.980952
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/extensions_tests/test_linear_shift.py
|
import unittest
import mock
from chainer import testing
from chainer import training
from chainer.training import extensions
class TestLinearShift(unittest.TestCase):
value_range = (2.0, 6.0)
time_range = (1, 3)
expect = [2.0, 2.0, 2.0, 2.0, 4.0, 4.0, 6.0, 6.0, 6.0, 6.0]
def setUp(self):
self.optimizer = mock.MagicMock()
self.extension = extensions.LinearShift(
'x', self.value_range, self.time_range, self.optimizer)
self.interval = 2
self.trigger = training.get_trigger((self.interval, 'iteration'))
self.trainer = testing.get_trainer_with_mock_updater(self.trigger)
self.trainer.updater.get_optimizer.return_value = self.optimizer
def _run_trainer(self, extension, expect, optimizer=None):
if optimizer is None:
optimizer = self.optimizer
extension.initialize(self.trainer)
actual = []
for _ in expect:
self.trainer.updater.update()
actual.append(optimizer.x)
if self.trigger(self.trainer):
extension(self.trainer)
self.assertEqual(actual, expect)
def test_basic(self):
self.optimizer.x = 0
extension = extensions.LinearShift(
'x', self.value_range, self.time_range)
self._run_trainer(extension, self.expect)
def test_with_optimizer(self):
optimizer = mock.Mock()
optimizer.x = 0
extension = extensions.LinearShift(
'x', self.value_range, self.time_range, optimizer)
self._run_trainer(extension, self.expect, optimizer)
def test_resume(self):
new_optimizer = mock.Mock()
new_extension = extensions.LinearShift(
'x', self.value_range, self.time_range, new_optimizer)
self.trainer.extend(self.extension)
self.trainer.run()
new_trainer = testing.get_trainer_with_mock_updater((5, 'iteration'))
new_trainer.extend(new_extension)
testing.save_and_load_npz(self.trainer, new_trainer)
new_extension.initialize(new_trainer)
self.assertEqual(new_optimizer.x, self.optimizer.x)
self.assertIsInstance(new_optimizer.x, float)
testing.run_module(__name__, __file__)
| 2,245
| 30.194444
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/extensions_tests/test_print_report.py
|
import tempfile
import unittest
import mock
from chainer import testing
from chainer.training import extensions
class TestPrintReport(unittest.TestCase):
def _setup(self, stream=None, delete_flush=False):
self.logreport = mock.MagicMock(spec=extensions.LogReport(
['epoch'], trigger=(1, 'iteration'), log_name=None))
if stream is None:
self.stream = mock.MagicMock()
if delete_flush:
del self.stream.flush
else:
self.stream = stream
self.report = extensions.PrintReport(
['epoch'], log_report=self.logreport, out=self.stream)
self.trainer = testing.get_trainer_with_mock_updater(
stop_trigger=(1, 'iteration'))
self.trainer.extend(self.logreport)
self.trainer.extend(self.report)
self.logreport.log = [{'epoch': 0}]
def test_stream_with_flush_is_flushed(self):
self._setup(delete_flush=False)
self.assertTrue(hasattr(self.stream, 'flush'))
self.stream.flush.assert_not_called()
self.report(self.trainer)
self.stream.flush.assert_called_with()
def test_stream_without_flush_raises_no_exception(self):
self._setup(delete_flush=True)
self.assertFalse(hasattr(self.stream, 'flush'))
self.report(self.trainer)
def test_real_stream_raises_no_exception(self):
with tempfile.TemporaryFile(mode='w') as stream:
self._setup(stream=stream)
self.report(self.trainer)
testing.run_module(__name__, __file__)
| 1,568
| 31.6875
| 66
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/extensions_tests/test_evaluator.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import _cpu
from chainer import dataset
from chainer import iterators
from chainer import testing
from chainer.training import extensions
class DummyModel(chainer.Chain):
def __init__(self, test):
super(DummyModel, self).__init__()
self.args = []
self.test = test
def forward(self, x):
self.args.append(x)
chainer.report({'loss': x.sum()}, self)
class DummyModelTwoArgs(chainer.Chain):
def __init__(self, test):
super(DummyModelTwoArgs, self).__init__()
self.args = []
self.test = test
def forward(self, x, y):
self.args.append((x, y))
with chainer.using_device(backend.get_device_from_array(x, y)):
chainer.report({'loss': x.sum() + y.sum()}, self)
class DummyIterator(dataset.Iterator):
def __init__(self, return_values):
self.iterator = iter(return_values)
self.finalized = False
self.return_values = return_values
def reset(self):
self.iterator = iter(self.return_values)
def __next__(self):
return next(self.iterator)
def finalize(self):
self.finalized = True
class DummyConverter(object):
def __init__(self, return_values):
self.args = []
self.iterator = iter(return_values)
def __call__(self, batch, device):
self.args.append({'batch': batch, 'device': device})
return next(self.iterator)
class TestEvaluator(unittest.TestCase):
def setUp(self):
self.data = [
numpy.random.uniform(-1, 1, (3, 4)).astype('f') for _ in range(2)]
self.batches = [
numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
for _ in range(2)]
self.iterator = DummyIterator(self.data)
self.converter = DummyConverter(self.batches)
self.target = DummyModel(self)
self.evaluator = extensions.Evaluator(
self.iterator, self.target, converter=self.converter)
self.expect_mean = numpy.mean([numpy.sum(x) for x in self.batches])
def test_evaluate(self):
reporter = chainer.Reporter()
reporter.add_observer('target', self.target)
with reporter:
mean = self.evaluator.evaluate()
# No observation is reported to the current reporter. Instead the
# evaluator collect results in order to calculate their mean.
self.assertEqual(len(reporter.observation), 0)
# The converter gets results of the iterator.
self.assertEqual(len(self.converter.args), len(self.data))
for i in range(len(self.data)):
numpy.testing.assert_array_equal(
self.converter.args[i]['batch'], self.data[i])
self.assertIsNone(self.converter.args[i]['device'])
# The model gets results of converter.
self.assertEqual(len(self.target.args), len(self.batches))
for i in range(len(self.batches)):
numpy.testing.assert_array_equal(
self.target.args[i], self.batches[i])
self.assertAlmostEqual(mean['target/loss'], self.expect_mean, places=4)
self.evaluator.finalize()
self.assertTrue(self.iterator.finalized)
def test_call(self):
mean = self.evaluator()
# 'main' is used by default
self.assertAlmostEqual(mean['main/loss'], self.expect_mean, places=4)
def test_evaluator_name(self):
self.evaluator.name = 'eval'
mean = self.evaluator()
# name is used as a prefix
self.assertAlmostEqual(
mean['eval/main/loss'], self.expect_mean, places=4)
def test_current_report(self):
reporter = chainer.Reporter()
with reporter:
mean = self.evaluator()
# The result is reported to the current reporter.
self.assertEqual(reporter.observation, mean)
@chainer.testing.backend.inject_backend_tests(
None,
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# Custom converter is not supported for ChainerX.
])
class TestEvaluatorTupleData(unittest.TestCase):
def setUp(self):
self.data = [
numpy.random.uniform(-1, 1, (3, 4)).astype('f') for _ in range(2)]
self.batches = [
(numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f'),
numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f'))
for _ in range(2)]
def prepare(self, data, batches, device):
iterator = DummyIterator(data)
converter = DummyConverter(batches)
target = DummyModelTwoArgs(self)
evaluator = extensions.Evaluator(
iterator, target, converter=converter, device=device)
return iterator, converter, target, evaluator
def test_evaluate(self, backend_config):
data = backend_config.get_array(self.data)
batches = [backend_config.get_array(b) for b in self.batches]
device = backend_config.device
iterator, converter, target, evaluator = (
self.prepare(data, batches, device))
reporter = chainer.Reporter()
reporter.add_observer('target', target)
with reporter:
mean = evaluator.evaluate()
# The converter gets results of the iterator and the device number.
self.assertEqual(len(converter.args), len(data))
if backend_config.use_cuda:
expected_device_arg = backend_config.cuda_device
else:
expected_device_arg = -1
for i in range(len(data)):
numpy.testing.assert_array_equal(
_cpu._to_cpu(converter.args[i]['batch']), self.data[i])
self.assertEqual(converter.args[i]['device'], expected_device_arg)
# The model gets results of converter.
self.assertEqual(len(target.args), len(batches))
for i in range(len(batches)):
numpy.testing.assert_array_equal(
_cpu._to_cpu(target.args[i]), self.batches[i])
expect_mean = numpy.mean([numpy.sum(x) for x in self.batches])
self.assertAlmostEqual(
_cpu._to_cpu(mean['target/loss']), expect_mean, places=4)
class TestEvaluatorDictData(unittest.TestCase):
def setUp(self):
self.data = range(2)
self.batches = [
{'x': numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f'),
'y': numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')}
for _ in range(2)]
self.iterator = DummyIterator(self.data)
self.converter = DummyConverter(self.batches)
self.target = DummyModelTwoArgs(self)
self.evaluator = extensions.Evaluator(
self.iterator, self.target, converter=self.converter)
def test_evaluate(self):
reporter = chainer.Reporter()
reporter.add_observer('target', self.target)
with reporter:
mean = self.evaluator.evaluate()
# The model gets results of converter.
self.assertEqual(len(self.target.args), len(self.batches))
for i in range(len(self.batches)):
numpy.testing.assert_array_equal(
self.target.args[i][0], self.batches[i]['x'])
numpy.testing.assert_array_equal(
self.target.args[i][1], self.batches[i]['y'])
expect_mean = numpy.mean(
[numpy.sum(x['x']) + numpy.sum(x['y']) for x in self.batches])
self.assertAlmostEqual(mean['target/loss'], expect_mean, places=4)
class TestEvaluatorWithEvalFunc(unittest.TestCase):
def setUp(self):
self.data = [
numpy.random.uniform(-1, 1, (3, 4)).astype('f') for _ in range(2)]
self.batches = [
numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
for _ in range(2)]
self.iterator = DummyIterator(self.data)
self.converter = DummyConverter(self.batches)
self.target = DummyModel(self)
self.evaluator = extensions.Evaluator(
self.iterator, {}, converter=self.converter,
eval_func=self.target)
def test_evaluate(self):
reporter = chainer.Reporter()
reporter.add_observer('target', self.target)
with reporter:
self.evaluator.evaluate()
# The model gets results of converter.
self.assertEqual(len(self.target.args), len(self.batches))
for i in range(len(self.batches)):
numpy.testing.assert_array_equal(
self.target.args[i], self.batches[i])
@testing.parameterize(*testing.product({
'repeat': [True, False],
'iterator_class': [iterators.SerialIterator,
iterators.MultiprocessIterator,
iterators.MultithreadIterator]
}))
class TestEvaluatorRepeat(unittest.TestCase):
def test_user_warning(self):
dataset = numpy.ones((4, 6))
iterator = self.iterator_class(dataset, 2, repeat=self.repeat)
if self.repeat:
with testing.assert_warns(UserWarning):
extensions.Evaluator(iterator, {})
class TestEvaluatorProgressBar(unittest.TestCase):
def setUp(self):
self.data = [
numpy.random.uniform(-1, 1, (3, 4)).astype('f') for _ in range(2)]
self.iterator = iterators.SerialIterator(
self.data, 1, repeat=False, shuffle=False)
self.target = DummyModel(self)
self.evaluator = extensions.Evaluator(
self.iterator, {}, eval_func=self.target, progress_bar=True)
def test_evaluator(self):
reporter = chainer.Reporter()
reporter.add_observer('target', self.target)
with reporter:
self.evaluator.evaluate()
testing.run_module(__name__, __file__)
| 9,832
| 32.332203
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/extensions_tests/test_snapshot.py
|
import glob
import itertools
import os
import shutil
import tempfile
import time
import unittest
import mock
import pytest
from chainer import testing
from chainer import training
from chainer.training import extensions
from chainer.training.extensions._snapshot import _find_latest_snapshot
from chainer.training.extensions._snapshot import _find_snapshot_files
from chainer.training.extensions._snapshot import _find_stale_snapshots
class TestSnapshot(unittest.TestCase):
def test_call(self):
t = mock.MagicMock()
c = mock.MagicMock(side_effect=[True, False])
w = mock.MagicMock()
snapshot = extensions.snapshot(target=t, condition=c, writer=w)
trainer = mock.MagicMock()
snapshot(trainer)
snapshot(trainer)
assert c.call_count == 2
assert w.call_count == 1
def test_savefun_and_writer_exclusive(self):
# savefun and writer arguments cannot be specified together.
def savefun(*args, **kwargs):
assert False
writer = extensions.snapshot_writers.SimpleWriter()
with pytest.raises(TypeError):
extensions.snapshot(savefun=savefun, writer=writer)
trainer = mock.MagicMock()
with pytest.raises(TypeError):
extensions.snapshot_object(trainer, savefun=savefun, writer=writer)
class TestSnapshotSaveFile(unittest.TestCase):
def setUp(self):
self.trainer = testing.get_trainer_with_mock_updater()
self.trainer.out = '.'
self.trainer._done = True
def tearDown(self):
if os.path.exists('myfile.dat'):
os.remove('myfile.dat')
def test_save_file(self):
w = extensions.snapshot_writers.SimpleWriter()
snapshot = extensions.snapshot_object(self.trainer, 'myfile.dat',
writer=w)
snapshot(self.trainer)
self.assertTrue(os.path.exists('myfile.dat'))
def test_clean_up_tempdir(self):
snapshot = extensions.snapshot_object(self.trainer, 'myfile.dat')
snapshot(self.trainer)
left_tmps = [fn for fn in os.listdir('.')
if fn.startswith('tmpmyfile.dat')]
self.assertEqual(len(left_tmps), 0)
class TestSnapshotOnError(unittest.TestCase):
def setUp(self):
self.trainer = testing.get_trainer_with_mock_updater()
self.trainer.out = '.'
self.filename = 'myfile-deadbeef.dat'
def tearDown(self):
if os.path.exists(self.filename):
os.remove(self.filename)
def test_on_error(self):
class TheOnlyError(Exception):
pass
@training.make_extension(trigger=(1, 'iteration'), priority=100)
def exception_raiser(trainer):
raise TheOnlyError()
self.trainer.extend(exception_raiser)
snapshot = extensions.snapshot_object(self.trainer, self.filename,
snapshot_on_error=True)
self.trainer.extend(snapshot)
self.assertFalse(os.path.exists(self.filename))
with self.assertRaises(TheOnlyError):
self.trainer.run()
self.assertTrue(os.path.exists(self.filename))
@testing.parameterize(*testing.product({'fmt':
['snapshot_iter_{}',
'snapshot_iter_{}.npz',
'{}_snapshot_man_suffix.npz']}))
class TestFindSnapshot(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.path)
def test_find_snapshot_files(self):
files = (self.fmt.format(i) for i in range(1, 100))
noise = ('dummy-foobar-iter{}'.format(i) for i in range(10, 304))
noise2 = ('tmpsnapshot_iter_{}'.format(i) for i in range(10, 304))
for file in itertools.chain(noise, files, noise2):
file = os.path.join(self.path, file)
open(file, 'w').close()
snapshot_files = _find_snapshot_files(self.fmt, self.path)
expected = sorted([self.fmt.format(i) for i in range(1, 100)])
assert len(snapshot_files) == 99
timestamps, snapshot_files = zip(*snapshot_files)
assert expected == sorted(list(snapshot_files))
def test_find_latest_snapshot(self):
files = [self.fmt.format(i) for i in range(1, 100)]
base_timestamp = time.time()
for i, file in enumerate(files):
file = os.path.join(self.path, file)
open(file, 'w').close()
# mtime resolution of some filesystems e.g. ext3 or HFS+
# is a second and thus snapshot files such as
# ``snapshot_iter_9`` and ``snapshot_iter_99`` may have
# same timestamp if it does not have enough interval
# between file creation. As current autosnapshot does not
# uses integer knowledge, timestamp is intentionally
# modified here. This comment also applies to other tests
# in this file on snapshot freshness.
t = base_timestamp + i
os.utime(file, (t, t))
assert self.fmt.format(99) == _find_latest_snapshot(self.fmt,
self.path)
@testing.parameterize(*testing.product({'fmt':
['snapshot_iter_{}_{}',
'snapshot_iter_{}_{}.npz',
'{}_snapshot_man_{}-suffix.npz',
'snapshot_iter_{}.{}']}))
class TestFindSnapshot2(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp()
self.files = (self.fmt.format(i*10, j*10) for i, j
in itertools.product(range(0, 10), range(0, 10)))
def tearDown(self):
shutil.rmtree(self.path)
def test_find_snapshot_files(self):
noise = ('tmpsnapshot_iter_{}.{}'.format(i, j)
for i, j in zip(range(10, 304), range(10, 200)))
for file in itertools.chain(noise, self.files):
file = os.path.join(self.path, file)
open(file, 'w').close()
snapshot_files = _find_snapshot_files(self.fmt, self.path)
expected = [self.fmt.format(i*10, j*10)
for i, j in itertools.product(range(0, 10), range(0, 10))]
timestamps, snapshot_files = zip(*snapshot_files)
expected.sort()
snapshot_files = sorted(list(snapshot_files))
assert expected == snapshot_files
@testing.parameterize(*testing.product({'length_retain':
[(100, 30), (10, 30), (1, 1000),
(1000, 1), (1, 1), (1, 3), (2, 3)]}))
class TestFindStaleSnapshot(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.path)
def test_find_stale_snapshot(self):
length, retain = self.length_retain
fmt = 'snapshot_iter_{}'
files = [fmt.format(i) for i in range(0, length)]
base_timestamp = time.time() - length * 2
for i, file in enumerate(files):
file = os.path.join(self.path, file)
open(file, 'w').close()
# Same comment applies here. See comment in ``TestFindSnapshot``
t = base_timestamp + i
os.utime(file, (t, t))
stale = list(_find_stale_snapshots(fmt, self.path, retain))
assert max(length-retain, 0) == len(stale)
expected = [fmt.format(i) for i in range(0, max(length-retain, 0))]
assert expected == stale
class TestRemoveStaleSnapshots(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.path)
def test_remove_stale_snapshots(self):
fmt = 'snapshot_iter_{.updater.iteration}'
retain = 3
snapshot = extensions.snapshot(filename=fmt, n_retains=retain,
autoload=False)
trainer = testing.get_trainer_with_mock_updater()
trainer.out = self.path
trainer.extend(snapshot, trigger=(1, 'iteration'), priority=2)
class TimeStampUpdater():
t = time.time() - 100
name = 'ts_updater'
priority = 1 # This must be called after snapshot taken
def __call__(self, _trainer):
filename = os.path.join(_trainer.out, fmt.format(_trainer))
self.t += 1
# For filesystems that does low timestamp precision
os.utime(filename, (self.t, self.t))
trainer.extend(TimeStampUpdater(), trigger=(1, 'iteration'))
trainer.run()
assert 10 == trainer.updater.iteration
assert trainer._done
pattern = os.path.join(trainer.out, "snapshot_iter_*")
found = [os.path.basename(path) for path in glob.glob(pattern)]
assert retain == len(found)
found.sort()
# snapshot_iter_(8, 9, 10) expected
expected = ['snapshot_iter_{}'.format(i) for i in range(8, 11)]
expected.sort()
assert expected == found
trainer2 = testing.get_trainer_with_mock_updater()
trainer2.out = self.path
assert not trainer2._done
snapshot2 = extensions.snapshot(filename=fmt, autoload=True)
# Just making sure no error occurs
snapshot2.initialize(trainer2)
testing.run_module(__name__, __file__)
| 9,566
| 34.172794
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/extensions_tests/test_parameter_statistics.py
|
import re
import time
import unittest
import mock
import six
import chainer
from chainer import backend
from chainer import testing
from chainer import training
from chainer.training import extensions
def _get_mocked_trainer(links, stop_trigger=(10, 'iteration')):
updater = mock.Mock()
optimizer = mock.Mock()
target = mock.Mock()
target.namedlinks.return_value = [
(str(i), link) for i, link in enumerate(links)]
optimizer.target = target
updater.get_all_optimizers.return_value = {'optimizer_name': optimizer}
updater.iteration = 0
updater.epoch = 0
updater.epoch_detail = 0
updater.is_new_epoch = True
iter_per_epoch = 10
def update():
time.sleep(0.001)
updater.iteration += 1
updater.epoch = updater.iteration // iter_per_epoch
updater.epoch_detail = updater.iteration / iter_per_epoch
updater.is_new_epoch = updater.epoch == updater.epoch_detail
updater.update = update
return training.Trainer(updater, stop_trigger)
class TestParameterStatisticsBase(object):
def setUp(self):
self.trainer = _get_mocked_trainer(self.links)
def create_extension(self, skip_statistics=False):
kwargs = {
'statistics': self.statistics if not skip_statistics else None,
'report_params': self.report_params,
'report_grads': self.report_grads,
'prefix': self.prefix,
'skip_nan_params': True # avoid warnings when grads are nan
}
return extensions.ParameterStatistics(self.links, **kwargs)
@testing.parameterize(
{
'links': [chainer.links.Linear(3, 2)],
'statistics': {'min': lambda x: backend.get_array_module(x).min(x)},
'report_params': True,
'report_grads': True,
'prefix': None,
'expect': 4
},
{
'links': [chainer.links.Linear(3, 2)],
'statistics': {'min': lambda x: backend.get_array_module(x).min(x)},
'report_params': False,
'report_grads': True,
'prefix': 'test',
'expect': 2
},
{
'links': [chainer.links.Linear(3, 2)],
'statistics': {'min': lambda x: backend.get_array_module(x).min(x)},
'report_params': True,
'report_grads': False,
'prefix': None,
'expect': 2
},
{
'links': [chainer.links.Linear(3, 2)],
'statistics': {'min': lambda x: backend.get_array_module(x).min(x)},
'report_params': False,
'report_grads': False,
'prefix': 'test',
'expect': 0
},
{
'links': [chainer.links.Linear(3, 2)],
'statistics': extensions.ParameterStatistics.default_statistics,
'report_params': True,
'report_grads': True,
'prefix': None,
'expect': 36
},
{
'links': [chainer.links.Linear(3, 2)],
'statistics': extensions.ParameterStatistics.default_statistics,
'report_params': True,
'report_grads': False,
'prefix': 'test',
'expect': 24
},
{
'links': [chainer.links.Linear(3, 2)],
'statistics': extensions.ParameterStatistics.default_statistics,
'report_params': False,
'report_grads': True,
'prefix': None,
'expect': 12
},
{
'links': [chainer.links.Linear(3, 2)],
'statistics': extensions.ParameterStatistics.default_statistics,
'report_params': False,
'report_grads': False,
'prefix': 'test',
'expect': 0
}
)
class TestParameterStatistics(TestParameterStatisticsBase, unittest.TestCase):
def test_report(self):
self.trainer.extend(self.create_extension())
self.trainer.run()
self.assertEqual(len(self.trainer.observation), self.expect)
def test_report_late_register(self):
extension = self.create_extension(skip_statistics=True)
for name, function in six.iteritems(self.statistics):
extension.register_statistics(name, function)
self.trainer.extend(extension)
self.trainer.run()
self.assertEqual(len(self.trainer.observation), self.expect)
def test_report_key_pattern(self):
self.trainer.extend(self.create_extension())
self.trainer.run()
pattern = r'^(.+/){2,}(data|grad)/.+[^/]$'
for name in six.iterkeys(self.trainer.observation):
if self.prefix is not None:
assert name.startswith(self.prefix)
match = re.match(pattern, name)
assert match is not None
if self.report_params and self.report_grads:
pass
elif self.report_params:
assert 'data' == match.group(2)
elif self.report_grads:
assert 'grad' == match.group(2)
@testing.parameterize(
{
'links': [chainer.links.Linear(3, 2)],
'statistics': {'one': lambda x: 1.0},
'report_params': True,
'report_grads': True,
'prefix': 'test',
'expect': 1.0
}
)
class TestParameterStatisticsCustomFunction(TestParameterStatisticsBase,
unittest.TestCase):
def test_custom_function(self):
extension = extensions.ParameterStatistics(
self.links, statistics=self.statistics)
self.trainer.extend(extension)
self.trainer.run()
for value in six.itervalues(self.trainer.observation):
self.assertEqual(value, self.expect)
testing.run_module(__name__, __file__)
| 5,565
| 29.415301
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/extensions_tests/test_polynomial_shift.py
|
import unittest
import mock
from chainer import testing
from chainer.training import extensions
from chainer.training import util
@testing.parameterize(
{'init': 2.0, 'rate': 0.5, 'max_count': 10, 'target': None,
'expect': [2.0, 1.8973665961010275, 1.7888543819998317]},
{'init': 2.0, 'rate': 0.5, 'max_count': 5, 'target': 1.2,
'expect': [2.0, 1.7888543819998317, 1.5491933384829668]},
{'init': -2.0, 'rate': 0.5, 'max_count': 10, 'target': None,
'expect': [-2.0, -1.8973665961010275, -1.7888543819998317]},
{'init': 2.0, 'rate': 0.5, 'max_count': 10, 'target': 1.8,
'expect': [2.0, 1.8973665961010275, 1.8]},
{'init': 2.0, 'rate': -0.5, 'max_count': 10, 'target': 2.2,
'expect': [2.0, 2.1081851067789197, 2.2]},
{'init': 2.0, 'rate': -0.5, 'max_count': 10, 'target': None,
'expect': [2.0, 2.1081851067789197, 2.23606797749979]},
{'init': 0, 'rate': -0.5, 'max_count': 10, 'target': None,
'expect': [0.0, 0.0, 0.0]},
)
class TestPolynomialShift(unittest.TestCase):
def setUp(self):
self.optimizer = mock.MagicMock()
self.extension = extensions.PolynomialShift(
'x', self.rate, self.max_count, self.init, self.target,
self.optimizer)
self.interval = 4
self.expect = [e for e in self.expect for _ in range(self.interval)]
self.trigger = util.get_trigger((self.interval, 'iteration'))
self.trainer = testing.get_trainer_with_mock_updater(self.trigger)
self.trainer.updater.get_optimizer.return_value = self.optimizer
def _run_trainer(self, extension, expect, optimizer=None):
if optimizer is None:
optimizer = self.optimizer
extension.initialize(self.trainer)
actual = []
for _ in expect:
self.trainer.updater.update()
actual.append(optimizer.x)
if self.trigger(self.trainer):
extension(self.trainer)
self.assertEqual(actual, expect)
def test_basic(self):
self.optimizer.x = 0
extension = extensions.PolynomialShift(
'x', self.rate, self.max_count, init=self.init, target=self.target)
self._run_trainer(extension, self.expect)
def test_without_init(self):
self.optimizer.x = self.init
extension = extensions.PolynomialShift(
'x', self.rate, self.max_count, target=self.target)
self._run_trainer(extension, self.expect)
def test_with_optimizer(self):
optimizer = mock.Mock()
optimizer.x = 0
extension = extensions.PolynomialShift(
'x', self.rate, self.max_count, init=self.init, target=self.target,
optimizer=optimizer)
self._run_trainer(extension, self.expect, optimizer)
def test_resume(self):
new_optimizer = mock.Mock()
new_extension = extensions.PolynomialShift(
'x', self.rate, self.max_count, self.init, self.target,
new_optimizer)
self.trainer.extend(self.extension)
self.trainer.run()
new_trainer = testing.get_trainer_with_mock_updater((3, 'iteration'))
new_trainer.extend(new_extension)
testing.save_and_load_npz(self.trainer, new_trainer)
new_extension.initialize(new_trainer)
self.assertEqual(new_optimizer.x, self.optimizer.x)
self.assertIsInstance(new_optimizer.x, float)
testing.run_module(__name__, __file__)
| 3,445
| 35.659574
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/extensions_tests/test_computational_graph.py
|
import os
import shutil
import tempfile
import unittest
import numpy
import chainer
from chainer import configuration
from chainer import links
from chainer import testing
from chainer import training
from chainer.training.extensions import computational_graph as c
class Function1(chainer.FunctionNode):
def forward(self, inputs):
return inputs[0],
class Function2(chainer.FunctionNode):
def forward(self, inputs):
return inputs[0],
class Dataset(chainer.dataset.DatasetMixin):
def __init__(self, values):
self.values = values
def __len__(self):
return len(self.values)
def get_example(self, i):
return numpy.array([self.values[i]], numpy.float32), numpy.int32(i % 2)
class Model(chainer.Link):
def __init__(self):
super(Model, self).__init__()
self.flag_history = []
self.l1 = links.Linear(2)
self.l2 = links.Linear(2)
self.i = 0
def forward(self, x):
self.flag_history.append(
configuration.config.keep_graph_on_report)
h = self.l1(x)
if self.i == 0:
h, = Function1().apply((h,))
else:
h, = Function2().apply((h,))
h = self.l2(h)
self.i += 1
return h
class TestGraphBuilderKeepGraphOnReport(unittest.TestCase):
def _run_test(self, tempdir, initial_flag):
n_data = 4
n_epochs = 3
outdir = os.path.join(tempdir, 'testresult')
# Prepare
model = Model()
classifier = links.Classifier(model)
optimizer = chainer.optimizers.Adam()
optimizer.setup(classifier)
dataset = Dataset([i for i in range(n_data)])
iterator = chainer.iterators.SerialIterator(dataset, 1, shuffle=False)
updater = training.updaters.StandardUpdater(iterator, optimizer)
trainer = training.Trainer(updater, (n_epochs, 'epoch'), out=outdir)
extension = c.DumpGraph('main/loss', filename='test.dot')
trainer.extend(extension)
# Run
with chainer.using_config('keep_graph_on_report', initial_flag):
trainer.run()
# Check flag history
self.assertEqual(model.flag_history,
[True] + [initial_flag] * (n_data * n_epochs - 1))
# Check the dumped graph
graph_path = os.path.join(outdir, 'test.dot')
with open(graph_path) as f:
graph_dot = f.read()
# Check that only the first iteration is dumped
self.assertIn('Function1', graph_dot)
self.assertNotIn('Function2', graph_dot)
if c.is_graphviz_available():
self.assertTrue(os.path.exists(os.path.join(outdir, 'test.png')))
def _check(self, initial_flag):
tempdir = tempfile.mkdtemp()
try:
self._run_test(tempdir, initial_flag)
finally:
shutil.rmtree(tempdir)
def test_keep_graph_on_report_flag_true(self):
self._check(True)
def test_keep_graph_on_report_flag_false(self):
self._check(False)
testing.run_module(__name__, __file__)
| 3,109
| 25.355932
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/extensions_tests/test_step_shift.py
|
import unittest
import mock
from chainer import testing
from chainer import training
from chainer.training import extensions
@testing.parameterize(
{'init': 2.0, 'gamma': 0.5, 'step': 2, 'target': None,
'expect': [2.0, 2.0, 1.0, 1.0, 0.5, 0.5]},
{'init': 2.0, 'gamma': 0.5, 'step': 2, 'target': 1.2,
'expect': [2.0, 2.0, 1.2, 1.2, 1.2, 1.2]},
{'init': -2.0, 'gamma': 0.5, 'step': 2, 'target': -1.2,
'expect': [-2.0, -2.0, -1.2, -1.2, -1.2, -1.2]},
{'init': 2.0, 'gamma': 2.0, 'step': 2, 'target': None,
'expect': [2.0, 2.0, 4.0, 4.0, 8.0, 8.0]},
{'init': 2.0, 'gamma': 2.0, 'step': 2, 'target': 3.0,
'expect': [2.0, 2.0, 3.0, 3.0, 3.0, 3.0]},
{'init': -2.0, 'gamma': 2.0, 'step': 2, 'target': -3.0,
'expect': [-2.0, -2.0, -3.0, -3.0, -3.0, -3.0]},
)
class TestStepShift(unittest.TestCase):
def setUp(self):
self.optimizer = mock.MagicMock()
self.extension = extensions.StepShift(
'x', self.gamma, self.step, self.init, self.target, self.optimizer)
self.interval = 1
self.expect = [e for e in self.expect for _ in range(self.interval)]
self.trigger = training.get_trigger((self.interval, 'iteration'))
self.trainer = testing.get_trainer_with_mock_updater(self.trigger)
self.trainer.updater.get_optimizer.return_value = self.optimizer
def _run_trainer(self, extension, expect, optimizer=None):
if optimizer is None:
optimizer = self.optimizer
extension.initialize(self.trainer)
actual = []
for _ in expect:
self.trainer.updater.update()
actual.append(optimizer.x)
if self.trigger(self.trainer):
extension(self.trainer)
self.assertEqual(actual, expect)
def test_basic(self):
self.optimizer.x = 0
extension = extensions.StepShift(
'x', self.gamma, self.step, init=self.init, target=self.target)
self._run_trainer(extension, self.expect)
def test_without_init(self):
self.optimizer.x = self.init
extension = extensions.StepShift(
'x', self.gamma, self.step, init=self.init, target=self.target)
self._run_trainer(extension, self.expect)
def test_with_optimizer(self):
optimizer = mock.Mock()
optimizer.x = 0
extension = extensions.StepShift(
'x', self.gamma, self.step, self.init, self.target, optimizer)
self._run_trainer(extension, self.expect, optimizer)
def test_resume(self):
new_optimizer = mock.Mock()
new_extension = extensions.StepShift(
'x', self.gamma, self.step, self.init, self.target, new_optimizer)
self.trainer.extend(self.extension)
self.trainer.run()
new_trainer = testing.get_trainer_with_mock_updater((5, 'iteration'))
new_trainer.extend(new_extension)
testing.save_and_load_npz(self.trainer, new_trainer)
new_extension.initialize(new_trainer)
self.assertEqual(new_optimizer.x, self.optimizer.x)
self.assertIsInstance(new_optimizer.x, float)
testing.run_module(__name__, __file__)
| 3,172
| 34.651685
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/extensions_tests/test_exponential_shift.py
|
import unittest
import mock
from chainer import testing
from chainer.training import extensions
from chainer.training import util
@testing.parameterize(
{'init': 2.0, 'rate': 0.5, 'target': None, 'expect': [2.0, 1.0, 0.5]},
{'init': 2.0, 'rate': 0.5, 'target': 1.2, 'expect': [2.0, 1.2, 1.2]},
{'init': -2.0, 'rate': 0.5, 'target': -1.2, 'expect': [-2.0, -1.2, -1.2]},
{'init': 2.0, 'rate': 2.0, 'target': None, 'expect': [2.0, 4.0, 8.0]},
{'init': 2.0, 'rate': 2.0, 'target': 3.0, 'expect': [2.0, 3.0, 3.0]},
{'init': -2.0, 'rate': 2.0, 'target': -3.0, 'expect': [-2.0, -3.0, -3.0]},
)
class TestExponentialShift(unittest.TestCase):
def setUp(self):
self.optimizer = mock.MagicMock()
self.extension = extensions.ExponentialShift(
'x', self.rate, self.init, self.target, self.optimizer)
self.interval = 4
self.expect = [e for e in self.expect for _ in range(self.interval)]
self.trigger = util.get_trigger((self.interval, 'iteration'))
self.trainer = testing.get_trainer_with_mock_updater(self.trigger)
self.trainer.updater.get_optimizer.return_value = self.optimizer
def _run_trainer(self, extension, expect, optimizer=None):
if optimizer is None:
optimizer = self.optimizer
extension.initialize(self.trainer)
actual = []
for _ in expect:
self.trainer.updater.update()
actual.append(optimizer.x)
if self.trigger(self.trainer):
extension(self.trainer)
self.assertEqual(actual, expect)
def test_basic(self):
self.optimizer.x = 0
extension = extensions.ExponentialShift(
'x', self.rate, init=self.init, target=self.target)
self._run_trainer(extension, self.expect)
def test_without_init(self):
self.optimizer.x = self.init
extension = extensions.ExponentialShift(
'x', self.rate, target=self.target)
self._run_trainer(extension, self.expect)
def test_with_optimizer(self):
optimizer = mock.Mock()
optimizer.x = 0
extension = extensions.ExponentialShift(
'x', self.rate, init=self.init, target=self.target,
optimizer=optimizer)
self._run_trainer(extension, self.expect, optimizer)
def test_resume(self):
new_optimizer = mock.Mock()
new_extension = extensions.ExponentialShift(
'x', self.rate, self.init, self.target, new_optimizer)
self.trainer.extend(self.extension)
self.trainer.run()
new_trainer = testing.get_trainer_with_mock_updater((3, 'iteration'))
new_trainer.extend(new_extension)
testing.save_and_load_npz(self.trainer, new_trainer)
new_extension.initialize(new_trainer)
self.assertEqual(new_optimizer.x, self.optimizer.x)
self.assertIsInstance(new_optimizer.x, float)
class TestExponentialShiftInvalidArgument(unittest.TestCase):
def test_negative_rate(self):
with self.assertRaises(ValueError):
extensions.ExponentialShift('x', -1.0)
testing.run_module(__name__, __file__)
| 3,169
| 33.835165
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/extensions_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/extensions_tests/test_warmup_shift.py
|
import unittest
import mock
from chainer import testing
from chainer.training import extensions
from chainer.training import util
@testing.parameterize(
{'init': 1, 'warmup_start': 0.1,
'warmup_iter': 100, 'expect': [0.1, 0.991, 1, 1]},
{'init': 0.1, 'warmup_start': 1,
'warmup_iter': 10, 'expect': [1, 0.19, 0.1, 0.1]},
{'init': 1, 'warmup_start': -1,
'warmup_iter': 10, 'expect': [-1, 0.8, 1, 1]},
{'init': 1, 'warmup_start': -1,
'warmup_iter': 2, 'expect': [-1, 0, 1, 1]},
{'init': 0.1, 'warmup_start': 1,
'warmup_iter': 2, 'expect': [1, 0.55, 0.1, 0.1]},
)
class TestWarmupShift(unittest.TestCase):
def setUp(self):
self.optimizer = mock.MagicMock()
self.extension = extensions.WarmupShift(
'x', self.warmup_start, self.warmup_iter,
self.init, self.optimizer)
self.interval = 1
self.expect = [e for e in self.expect for _ in range(self.interval)]
self.trigger = util.get_trigger((self.interval, 'iteration'))
self.trainer = testing.get_trainer_with_mock_updater(self.trigger)
self.trainer.updater.get_optimizer.return_value = self.optimizer
def _run_trainer(self, extension, expect, optimizer=None):
if optimizer is None:
optimizer = self.optimizer
extension.initialize(self.trainer)
actual = []
for _ in range(self.warmup_iter + 2):
self.trainer.updater.update()
actual.append(optimizer.x)
if self.trigger(self.trainer):
extension(self.trainer)
testing.assert_allclose(actual[0], expect[0])
testing.assert_allclose(actual[self.warmup_iter-1], expect[1])
testing.assert_allclose(actual[self.warmup_iter], expect[2])
testing.assert_allclose(actual[self.warmup_iter+1], expect[3])
def test_basic(self):
self.optimizer.x = 0
extension = extensions.WarmupShift(
'x', self.warmup_start, self.warmup_iter,
self.init, self.optimizer)
self._run_trainer(extension, self.expect)
def test_without_init(self):
self.optimizer.x = self.warmup_start
extension = extensions.WarmupShift(
'x', self.warmup_start, self.warmup_iter,
self.init, self.optimizer)
self._run_trainer(extension, self.expect)
def test_with_optimizer(self):
optimizer = mock.Mock()
optimizer.x = 0
extension = extensions.WarmupShift(
'x', self.warmup_start, self.warmup_iter,
self.init, optimizer)
self._run_trainer(extension, self.expect, optimizer)
testing.run_module(__name__, __file__)
| 2,688
| 33.922078
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/extensions_tests/test_inverse_shift.py
|
import unittest
import mock
from chainer import testing
from chainer.training import extensions
from chainer.training import util
@testing.parameterize(
{'init': 3.0, 'gamma': 1.0, 'power': 1.0, 'target': None,
'expect': [3.0, 1.5, 1.0]},
{'init': 3.0, 'gamma': 1.0, 'power': 1.0, 'target': 1.8,
'expect': [3.0, 1.8, 1.8]},
{'init': -3.0, 'gamma': 1.0, 'power': 1.0, 'target': -1.8,
'expect': [-3.0, -1.8, -1.8]},
{'init': 3.0, 'gamma': 1.0, 'power': -2.0, 'target': None,
'expect': [3.0, 12.0, 27.0]},
{'init': 3.0, 'gamma': 1.0, 'power': -2.0, 'target': 4.0,
'expect': [3.0, 4.0, 4.0]},
{'init': -3.0, 'gamma': 1.0, 'power': -2.0, 'target': -4.0,
'expect': [-3.0, -4.0, -4.0]},
)
class TestInverseShift(unittest.TestCase):
def setUp(self):
self.optimizer = mock.MagicMock()
self.extension = extensions.InverseShift(
'x', self.gamma, self.power, self.init, self.target,
self.optimizer)
self.interval = 4
self.expect = [e for e in self.expect for _ in range(self.interval)]
self.trigger = util.get_trigger((self.interval, 'iteration'))
self.trainer = testing.get_trainer_with_mock_updater(self.trigger)
self.trainer.updater.get_optimizer.return_value = self.optimizer
def _run_trainer(self, extension, expect, optimizer=None):
if optimizer is None:
optimizer = self.optimizer
extension.initialize(self.trainer)
actual = []
for _ in expect:
self.trainer.updater.update()
actual.append(optimizer.x)
if self.trigger(self.trainer):
extension(self.trainer)
self.assertEqual(actual, expect)
def test_basic(self):
self.optimizer.x = 0
extension = extensions.InverseShift(
'x', self.gamma, self.power, init=self.init, target=self.target)
self._run_trainer(extension, self.expect)
def test_without_init(self):
self.optimizer.x = self.init
extension = extensions.InverseShift(
'x', self.gamma, self.power, target=self.target)
self._run_trainer(extension, self.expect)
def test_with_optimizer(self):
optimizer = mock.Mock()
optimizer.x = 0
extension = extensions.InverseShift(
'x', self.gamma, self.power, init=self.init, target=self.target,
optimizer=optimizer)
self._run_trainer(extension, self.expect, optimizer)
def test_resume(self):
new_optimizer = mock.Mock()
new_extension = extensions.InverseShift(
'x', self.gamma, self.power, self.init, self.target, new_optimizer)
self.trainer.extend(self.extension)
self.trainer.run()
new_trainer = testing.get_trainer_with_mock_updater((3, 'iteration'))
new_trainer.extend(new_extension)
testing.save_and_load_npz(self.trainer, new_trainer)
new_extension.initialize(new_trainer)
self.assertEqual(new_optimizer.x, self.optimizer.x)
self.assertIsInstance(new_optimizer.x, float)
class TestInverseShiftInvalidArgument(unittest.TestCase):
def test_negative_rate(self):
with self.assertRaises(ValueError):
extensions.InverseShift('x', -1.0, 1.0)
testing.run_module(__name__, __file__)
| 3,344
| 33.132653
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/extensions_tests/test_multistep_shift.py
|
import unittest
import mock
from chainer import testing
from chainer.training import extensions
from chainer.training import util
@testing.parameterize(
{'init': 2.0, 'gamma': 0.1, 'step_value': [1, 3, 5],
'expect': [2.0, 0.2, 0.2, 0.02, 0.02, 0.002]},
{'init': -2.0, 'gamma': 0.1, 'step_value': [1, 3, 5],
'expect': [-2.0, -0.2, -0.2, -0.02, -0.02, -0.002]},
{'init': 2.0, 'gamma': 2, 'step_value': [1, 3, 5],
'expect': [2.0, 4.0, 4.0, 8.0, 8.0, 16.0]},
{'init': -2.0, 'gamma': 2, 'step_value': [1, 3, 5],
'expect': [-2.0, -4.0, -4.0, -8.0, -8.0, -16.0]},
)
class TestMutistepShift(unittest.TestCase):
def setUp(self):
self.optimizer = mock.MagicMock()
self.extension = extensions.MultistepShift(
'x', self.gamma, self.step_value, self.init, self.optimizer)
self.interval = 1
self.expect = [e for e in self.expect for _ in range(self.interval)]
self.trigger = util.get_trigger((self.interval, 'iteration'))
self.trainer = testing.get_trainer_with_mock_updater(self.trigger)
self.trainer.updater.get_optimizer.return_value = self.optimizer
def _run_trainer(self, extension, expect, optimizer=None):
if optimizer is None:
optimizer = self.optimizer
extension.initialize(self.trainer)
actual = []
for _ in expect:
self.trainer.updater.update()
actual.append(optimizer.x)
if self.trigger(self.trainer):
extension(self.trainer)
testing.assert_allclose(actual[0], expect[0])
testing.assert_allclose(actual[1], expect[1])
testing.assert_allclose(actual[2], expect[2])
testing.assert_allclose(actual[3], expect[3])
testing.assert_allclose(actual[4], expect[4])
testing.assert_allclose(actual[5], expect[5])
def test_basic(self):
self.optimizer.x = 0
extension = extensions.MultistepShift(
'x', self.gamma, self.step_value, self.init, self.optimizer)
self._run_trainer(extension, self.expect)
def test_without_init(self):
self.optimizer.x = self.init
extension = extensions.MultistepShift(
'x', self.gamma, self.step_value, self.init, self.optimizer)
self._run_trainer(extension, self.expect)
def test_with_optimizer(self):
optimizer = mock.Mock()
optimizer.x = 0
extension = extensions.MultistepShift(
'x', self.gamma, self.step_value, self.init, optimizer)
self._run_trainer(extension, self.expect, optimizer)
testing.run_module(__name__, __file__)
| 2,641
| 35.191781
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/extensions_tests/test_variable_statistics_plot.py
|
import os
import unittest
import numpy
import six
import chainer
from chainer import testing
from chainer.training import extensions
try:
import matplotlib
_available = True
except ImportError:
_available = False
class TestVariableStatisticsPlot(unittest.TestCase):
def setUp(self):
stop_trigger = (2, 'iteration')
extension_trigger = (1, 'iteration')
self.filename = 'variable_statistics_plot_test.png'
self.trainer = testing.get_trainer_with_mock_updater(
stop_trigger=stop_trigger)
x = numpy.random.rand(1, 2, 3)
self.extension = extensions.VariableStatisticsPlot(
chainer.variable.Variable(x), trigger=extension_trigger,
filename=self.filename)
self.trainer.extend(self.extension, extension_trigger)
# In the following we explicitly use plot_report._available instead of
# PlotReport.available() because in some cases `test_available()` fails
# because it sometimes does not raise UserWarning despite
# matplotlib is not installed (this is due to the difference between
# the behavior of unittest in python2 and that in python3).
@unittest.skipUnless(_available, 'matplotlib is not installed')
def test_run_and_save_plot(self):
matplotlib.use('Agg')
try:
self.trainer.run()
finally:
os.remove(os.path.join(self.trainer.out, self.filename))
@testing.parameterize(
{'shape': (2, 7, 3), 'n': 5, 'reservoir_size': 3}
)
class TestReservoir(unittest.TestCase):
def setUp(self):
self.xs = [
numpy.random.uniform(-1, 1, self.shape) for i in range(self.n)]
def test_reservoir_size(self):
self.reservoir = extensions.variable_statistics_plot.Reservoir(
size=self.reservoir_size, data_shape=self.shape)
for x in self.xs:
self.reservoir.add(x)
idxs, data = self.reservoir.get_data()
assert len(idxs) == self.reservoir_size
assert len(data) == self.reservoir_size
assert idxs.ndim == 1
assert data[0].shape == self.xs[0].shape
testing.assert_allclose(idxs, numpy.sort(idxs))
@testing.parameterize(
{'shape': (2, 7, 3)}
)
class TestStatistician(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape)
def test_statistician_percentile(self):
self.percentile_sigmas = (0., 50., 100.) # min, median, max
self.statistician = extensions.variable_statistics_plot.Statistician(
collect_mean=True, collect_std=True,
percentile_sigmas=self.percentile_sigmas)
stat = self.statistician(self.x, axis=None, dtype=self.x.dtype)
for s in six.itervalues(stat):
assert s.dtype == self.x.dtype
testing.assert_allclose(stat['mean'], numpy.mean(self.x))
testing.assert_allclose(stat['std'], numpy.std(self.x))
percentile = stat['percentile']
assert len(percentile) == 3
testing.assert_allclose(percentile[0], numpy.min(self.x))
testing.assert_allclose(percentile[1], numpy.median(self.x))
testing.assert_allclose(percentile[2], numpy.max(self.x))
testing.run_module(__name__, __file__)
| 3,260
| 30.970588
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/extensions_tests/test_snapshot_writers.py
|
import multiprocessing
import threading
import unittest
import mock
from chainer import testing
from chainer.training.extensions import snapshot_writers
from chainer import utils
snapshot_writers_path = 'chainer.training.extensions.snapshot_writers'
class TestSimpleWriter(unittest.TestCase):
def test_call(self):
target = mock.MagicMock()
w = snapshot_writers.SimpleWriter()
w.save = mock.MagicMock()
with utils.tempdir() as tempd:
w('myfile.dat', tempd, target)
assert w.save.call_count == 1
class TestStandardWriter(unittest.TestCase):
def test_call(self):
target = mock.MagicMock()
w = snapshot_writers.StandardWriter()
worker = mock.MagicMock()
name = snapshot_writers_path + '.StandardWriter.create_worker'
with mock.patch(name, return_value=worker):
with utils.tempdir() as tempd:
w('myfile.dat', tempd, target)
w('myfile.dat', tempd, target)
w.finalize()
assert worker.start.call_count == 2
assert worker.join.call_count == 2
class TestThreadWriter(unittest.TestCase):
def test_create_worker(self):
target = mock.MagicMock()
w = snapshot_writers.ThreadWriter()
with utils.tempdir() as tempd:
worker = w.create_worker('myfile.dat', tempd, target)
assert isinstance(worker, threading.Thread)
class TestProcessWriter(unittest.TestCase):
def test_create_worker(self):
target = mock.MagicMock()
w = snapshot_writers.ProcessWriter()
with utils.tempdir() as tempd:
worker = w.create_worker('myfile.dat', tempd, target)
assert isinstance(worker, multiprocessing.Process)
class TestQueueWriter(unittest.TestCase):
def test_call(self):
target = mock.MagicMock()
q = mock.MagicMock()
consumer = mock.MagicMock()
names = [snapshot_writers_path + '.QueueWriter.create_queue',
snapshot_writers_path + '.QueueWriter.create_consumer']
with mock.patch(names[0], return_value=q):
with mock.patch(names[1], return_value=consumer):
w = snapshot_writers.QueueWriter()
with utils.tempdir() as tempd:
w('myfile.dat', tempd, target)
w('myfile.dat', tempd, target)
w.finalize()
assert consumer.start.call_count == 1
assert q.put.call_count == 3
assert q.join.call_count, 1
assert consumer.join.call_count == 1
def test_consume(self):
names = [snapshot_writers_path + '.QueueWriter.create_queue',
snapshot_writers_path + '.QueueWriter.create_consumer']
with mock.patch(names[0]):
with mock.patch(names[1]):
task = mock.MagicMock()
q = mock.MagicMock()
q.get = mock.MagicMock(side_effect=[task, task, None])
w = snapshot_writers.QueueWriter()
w.consume(q)
assert q.get.call_count == 3
assert task[0].call_count == 2
assert q.task_done.call_count == 3
testing.run_module(__name__, __file__)
| 3,292
| 30.970874
| 72
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/updaters_tests/test_multiprocess_parallel_updater.py
|
import copy
import os
import subprocess
import sys
import unittest
import numpy
import chainer
from chainer.backends import cuda
import chainer.functions.math.minmax
from chainer import initializers
import chainer.reporter
from chainer import testing
from chainer.testing import attr
import chainer.training.updaters.multiprocess_parallel_updater as mpu
class SimpleNet(chainer.Chain):
insize = 5
def __init__(self, dtype=numpy.float32):
super(SimpleNet, self).__init__()
self.dtype = dtype
W = initializers.HeNormal(1 / numpy.sqrt(2), self.dtype)
bias = initializers.Zero(self.dtype)
with self.init_scope():
self.conv = chainer.links.Convolution2D(2, 2, 3, initialW=W,
initial_bias=bias)
self.fc = chainer.links.Linear(18, 2, initialW=W,
initial_bias=bias)
self.train = True
def clear(self):
self.loss = None
self.accuracy = None
def __call__(self, x, t):
h = chainer.functions.relu(self.conv(x))
y = self.fc(h)
self.loss = chainer.functions.softmax_cross_entropy(y, t)
self.accuracy = chainer.functions.accuracy(y, t)
return self.loss
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float16],
}))
class TestGatherScatter(unittest.TestCase):
@attr.gpu
def test_gather_scatter_grads(self):
cupy = cuda.cupy
model0 = SimpleNet(dtype=self.dtype)
model1 = copy.deepcopy(model0)
with testing.assert_warns(DeprecationWarning):
model0.to_gpu()
with testing.assert_warns(DeprecationWarning):
model1.to_gpu()
optimizer0 = chainer.optimizers.SGD(lr=1.0)
optimizer0.setup(model0)
optimizer1 = chainer.optimizers.SGD(lr=1.0)
optimizer1.setup(model1)
bsize = 8
x = numpy.random.uniform(0, 1, (bsize, 2, 5, 5)).astype(self.dtype)
t = numpy.empty(bsize, dtype=numpy.int32)
for i in range(bsize):
t[i] = i % 2
x = chainer.Variable(chainer.backends.cuda.to_gpu(x))
t = chainer.Variable(chainer.backends.cuda.to_gpu(t))
loss0 = model0(x, t)
model0.cleargrads()
model1.cleargrads()
loss0.backward()
gg0 = mpu.gather_grads(model0)
mpu.scatter_grads(model1, gg0)
cupy.testing.assert_array_equal(model0.conv.W.grad, model1.conv.W.grad)
cupy.testing.assert_array_equal(model0.conv.b.grad, model1.conv.b.grad)
cupy.testing.assert_array_equal(model0.fc.W.grad, model1.fc.W.grad)
cupy.testing.assert_array_equal(model0.fc.b.grad, model1.fc.b.grad)
optimizer0.update()
optimizer1.update()
cupy.testing.assert_array_equal(model0.conv.W.data, model1.conv.W.data)
cupy.testing.assert_array_equal(model0.conv.b.data, model1.conv.b.data)
cupy.testing.assert_array_equal(model0.fc.W.data, model1.fc.W.data)
cupy.testing.assert_array_equal(model0.fc.b.data, model1.fc.b.data)
def test_gather_grads_raise_on_cpu(self):
model = SimpleNet(dtype=self.dtype)
with self.assertRaises(RuntimeError):
mpu.gather_grads(model)
@attr.gpu
def test_gather_scatter_params(self):
cupy = cuda.cupy
model0 = SimpleNet(dtype=self.dtype)
model1 = SimpleNet(dtype=self.dtype)
with testing.assert_warns(DeprecationWarning):
model0.to_gpu()
with testing.assert_warns(DeprecationWarning):
model1.to_gpu()
gp0 = mpu.gather_params(model0)
mpu.scatter_params(model1, gp0)
cupy.testing.assert_array_equal(model0.conv.W.data, model1.conv.W.data)
cupy.testing.assert_array_equal(model0.conv.b.data, model1.conv.b.data)
cupy.testing.assert_array_equal(model0.fc.W.data, model1.fc.W.data)
cupy.testing.assert_array_equal(model0.fc.b.data, model1.fc.b.data)
def test_gather_params_raise_on_cpu(self):
model = SimpleNet(dtype=self.dtype)
with self.assertRaises(RuntimeError):
mpu.gather_params(model)
def _run_test_snippet(name, *args):
script_path = os.path.join(
os.path.dirname(__file__), 'snippets/{}'.format(name))
proc = subprocess.Popen(
(sys.executable, script_path) + args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdoutdata, stderrdata = proc.communicate()
ret = proc.returncode
return (ret, stdoutdata, stderrdata)
class TestRawArray(unittest.TestCase):
@attr.gpu
@unittest.skipUnless(mpu.MultiprocessParallelUpdater.available(),
'MultiprocessParallelUpdater is not available.')
def test_update_uses_raw_array(self):
ret, stdoutdata, stderrdata = _run_test_snippet(
'raw_array.py', '@cupy:0')
assert ret == 0, (
'[stdout]:{!r}\n'
'[stderr]:{!r}'.format(stdoutdata, stderrdata))
class TestChildReporter(unittest.TestCase):
def check_with_devices(self, n_devices):
devices_str = ','.join([
'@cupy:{}'.format(device_id) for device_id in range(n_devices)])
ret, stdoutdata, stderrdata = _run_test_snippet(
'child_reporter.py', devices_str)
assert ret == 0, (
'[stdout]:{!r}\n'
'[stderr]:{!r}'.format(stdoutdata, stderrdata))
@attr.gpu
@unittest.skipUnless(mpu.MultiprocessParallelUpdater.available(),
'MultiprocessParallelUpdater is not available.')
def test_single_device(self):
self.check_with_devices(1)
@attr.multi_gpu(2)
@unittest.skipUnless(mpu.MultiprocessParallelUpdater.available(),
'MultiprocessParallelUpdater is not available.')
def test_multi_device(self):
self.check_with_devices(2)
class TestCUDAContext(unittest.TestCase):
@attr.gpu
@unittest.skipUnless(mpu.MultiprocessParallelUpdater.available(),
'MultiprocessParallelUpdater is not available.')
def test_cuda_init_fork(self):
ret, stdoutdata, stderrdata = _run_test_snippet(
'cuda_init.py', '@cupy:0', 'fork')
assert ret == 0, (
'[stdout]:{!r}\n'
'[stderr]:{!r}'.format(stdoutdata, stderrdata))
@attr.gpu
@unittest.skipUnless(mpu.MultiprocessParallelUpdater.available(),
'MultiprocessParallelUpdater is not available.')
def test_cuda_init_spawn(self):
ret, stdoutdata, stderrdata = _run_test_snippet(
'cuda_init.py', '@cupy:0', 'spawn')
assert ret == 0, (
'[stdout]:{!r}\n'
'[stderr]:{!r}'.format(stdoutdata, stderrdata))
@attr.gpu
@unittest.skipUnless(mpu.MultiprocessParallelUpdater.available(),
'MultiprocessParallelUpdater is not available.')
def test_cuda_init_forkserver(self):
ret, stdoutdata, stderrdata = _run_test_snippet(
'cuda_init.py', '@cupy:0', 'forkserver')
assert ret == 0, (
'[stdout]:{!r}\n'
'[stderr]:{!r}'.format(stdoutdata, stderrdata))
class TestDevicesByDeviceIds(unittest.TestCase):
@attr.gpu
@unittest.skipUnless(mpu.MultiprocessParallelUpdater.available(),
'MultiprocessParallelUpdater is not available.')
def test_devices_by_device_ids_array(self):
# Test passing devices to MultiprocessParallelUpdater by their ids.
ret, stdoutdata, stderrdata = _run_test_snippet(
'raw_array.py', '0')
assert ret == 0, (
'[stdout]:{!r}\n'
'[stderr]:{!r}'.format(stdoutdata, stderrdata))
testing.run_module(__name__, __file__)
| 7,820
| 33.152838
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/updaters_tests/test_standard_updater.py
|
import contextlib
import unittest
import mock
import numpy
import pytest
import chainer
from chainer import backend
from chainer.backends import _cpu
from chainer.backends import cuda
from chainer import dataset
from chainer import testing
from chainer.testing import attr
from chainer import training
class DummyIterator(dataset.Iterator):
epoch = 1
is_new_epoch = True
def __init__(self, next_data):
self.finalize_called = 0
self.next_called = 0
self.next_data = next_data
self.serialize_called = []
def finalize(self):
self.finalize_called += 1
def __next__(self):
self.next_called += 1
return self.next_data
def serialize(self, serializer):
self.serialize_called.append(serializer)
class DummyOptimizer(chainer.Optimizer):
def __init__(self):
self.update = mock.MagicMock()
self.serialize_called = []
def serialize(self, serializer):
self.serialize_called.append(serializer)
class DummySerializer(chainer.Serializer):
def __init__(self, path=None):
if path is None:
path = []
self.path = path
self.called = []
def __getitem__(self, key):
return DummySerializer(self.path + [key])
def __call__(self, key, value):
self.called.append((key, value))
class TestStandardUpdater(unittest.TestCase):
def setUp(self):
self.target = chainer.Link()
self.iterator = DummyIterator([(numpy.array(1), numpy.array(2))])
self.optimizer = DummyOptimizer()
self.optimizer.setup(self.target)
self.updater = training.updaters.StandardUpdater(
self.iterator, self.optimizer)
def test_init_values(self):
assert self.updater.device is None
assert self.updater.input_device is None
assert self.updater.loss_func is None
assert self.updater.iteration == 0
def test_epoch(self):
assert self.updater.epoch == 1
def test_new_epoch(self):
assert self.updater.is_new_epoch is True
def test_get_iterator(self):
assert self.updater.get_iterator('main') is self.iterator
def test_get_optimizer(self):
assert self.updater.get_optimizer('main') is self.optimizer
def test_get_all_optimizers(self):
assert self.updater.get_all_optimizers() == {'main': self.optimizer}
def test_update(self):
self.updater.update()
assert self.updater.iteration == 1
assert self.optimizer.epoch == 1
assert self.iterator.next_called == 1
def test_use_auto_new_epoch(self):
assert self.optimizer.use_auto_new_epoch is True
def test_finalizer(self):
self.updater.finalize()
assert self.iterator.finalize_called == 1
def test_serialize(self):
serializer = DummySerializer()
self.updater.serialize(serializer)
assert len(self.iterator.serialize_called) == 1
assert self.iterator.serialize_called[0].path == ['iterator:main']
assert len(self.optimizer.serialize_called) == 1
assert self.optimizer.serialize_called[0].path == ['optimizer:main']
assert serializer.called == [('iteration', 0)]
_backend_params = [
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
@chainer.testing.backend.inject_backend_tests(None, _backend_params)
@chainer.testing.backend.inject_backend_tests(None, _backend_params)
@chainer.testing.backend.inject_backend_tests(None, _backend_params)
class TestStandardUpdaterDevice(unittest.TestCase):
def test_device(
self, model_initial_backend_config, model_backend_config,
input_backend_config):
model_initial_device = model_initial_backend_config.device
device = model_backend_config.device
input_device = input_backend_config.device
model = chainer.Link()
model.to_device(model_initial_device)
optimizer = DummyOptimizer()
optimizer.setup(model)
iterator = DummyIterator([numpy.array(1), numpy.array(2)])
updater = training.updaters.StandardUpdater(
iterator,
optimizer,
device=device,
input_device=input_device)
assert updater.device is device
assert updater.input_device is input_device
# Check the model device.
assert model.device == device
updater.update_core()
assert optimizer.update.call_count == 1
args, kwargs = optimizer.update.call_args
assert len(args) == 2
assert len(kwargs) == 0
loss, v1 = args
# Check the input device.
assert backend.get_device_from_array(v1) == input_device
class DummyDevice(backend.Device):
xp = numpy
supported_array_types = (numpy.ndarray,)
def __init__(self, index):
self.index = index
def __eq__(self, other):
return isinstance(other, DummyDevice) and other.index == self.index
# TODO(niboshi): Define name property instead (#7149).
def __str__(self):
return '@dummy:{}'.format(self.index)
def send_array(self, array):
return array.copy()
@testing.parameterize(*testing.product({
'omit_device': [True, False],
'omit_input_device': [True, False],
}))
class TestStandardUpdaterDeviceArgumentFallback(unittest.TestCase):
"""Tests the fallback behavior regarding device and input_device
arguments."""
def test_device_argument_fallback(self):
self.check_device_argument_fallback(
initial_model_device=DummyDevice(0),
initial_input_device=DummyDevice(1),
device_arg=DummyDevice(3),
input_device_arg=DummyDevice(4))
@attr.multi_gpu(2)
def test_gpu_to_gpu_transfer(self):
initial_model_device = backend.GpuDevice.from_device_id(0)
initial_input_device = backend.GpuDevice.from_device_id(0)
# GpuDevice is given as device/input_device arguments:
# - model GPU-to-GPU transfer should be skipped.
# - input GPU-to-GPU transfer should NOT be skipped.
# device : GPU 1
# input_device: Other device
self.check_device_argument_fallback(
initial_model_device=initial_model_device,
initial_input_device=initial_input_device,
device_arg=backend.GpuDevice.from_device_id(1),
input_device_arg=DummyDevice(0))
# device : GPU 1
# input_device: omitted
self.check_device_argument_fallback(
initial_model_device=initial_model_device,
initial_input_device=initial_input_device,
device_arg=backend.GpuDevice.from_device_id(1),
input_device_arg=None)
# device : Other device
# input_device: GPU 1
self.check_device_argument_fallback(
initial_model_device=initial_model_device,
initial_input_device=initial_input_device,
device_arg=DummyDevice(0),
input_device_arg=backend.GpuDevice.from_device_id(1))
# device : omitted
# input_device: GPU 1
self.check_device_argument_fallback(
initial_model_device=initial_model_device,
initial_input_device=initial_input_device,
device_arg=None,
input_device_arg=backend.GpuDevice.from_device_id(1))
def _get_expected_devices(
self,
initial_model_device,
initial_input_device,
device_arg,
input_device_arg):
# Determines the expected devices.
# Returns: (
# expected_device_attr: Expected StandardUpdater.device
# expected_model_device: Expected model device
# expected_input_device: Expected device given to converters
# )
# or None if an error is expected.
# If device_arg is given and is GpuDevice, it will skip GPU-to-GPU
# transfer of the model (but not input).
if (device_arg is not None
and isinstance(device_arg, backend.GpuDevice)):
if isinstance(initial_model_device, backend.GpuDevice):
expected_model_device = initial_model_device
else:
expected_model_device = device_arg
# If input_device is omitted, device argument should be used.
if input_device_arg is None:
expected_input_device = device_arg
else:
expected_input_device = input_device_arg
expected_device_attr = device_arg
return (
expected_device_attr,
expected_model_device,
expected_input_device)
# expect_table
# Key: (omit_device, omit_input_device)
# Value: (expected_device, expected_input_device)
#
# None means unchanged.
expect_table = {
(0, 0): (device_arg, input_device_arg),
(0, 1): (device_arg, device_arg),
(1, 0): (None, input_device_arg),
(1, 1): (None, None),
}
omit_device = 1 if device_arg is None else 0
omit_input_device = 1 if input_device_arg is None else 0
expected_model_device, expected_input_device = (
expect_table[(
omit_device,
omit_input_device)])
expected_device_attr = expected_model_device
return (
expected_device_attr,
expected_model_device,
expected_input_device)
def check_device_argument_fallback(
self,
initial_model_device,
initial_input_device,
device_arg,
input_device_arg):
if self.omit_device:
device_arg = None
if self.omit_input_device:
input_device_arg = None
actual_converter_device_args = []
@chainer.dataset.converter()
def convert(arr, device):
# The converter records the given device.
actual_converter_device_args.append(device)
if device is None:
return arr
return device.send(arr)
class Model(chainer.Link):
def __init__(self):
chainer.Link.__init__(self)
with self.init_scope():
self.p1 = chainer.Parameter()
self.p2 = chainer.Parameter(
numpy.array([1, 2], numpy.float32))
def forward(self, x):
return chainer.functions.identity(x)
model = Model()
model.to_device(initial_model_device)
optimizer = DummyOptimizer()
optimizer.setup(model)
iterator = DummyIterator([
initial_input_device.send(numpy.array(1)),
])
# Make kwargs
kwargs = {}
if device_arg is not None:
kwargs['device'] = device_arg
if input_device_arg is not None:
kwargs['input_device'] = input_device_arg
# Calculate the expected devices
expect = self._get_expected_devices(
initial_model_device,
initial_input_device,
device_arg,
input_device_arg)
if expect is None:
# Error is expected
with pytest.raises(KeyError):
training.updaters.StandardUpdater(
iterator, optimizer, convert, **kwargs)
return
(expected_device_attr,
expected_model_device,
expected_input_device) = expect
# FutureWarning should be raised iff. GPU-to-GPU transfer
expect_future_warning = (
device_arg is not None
and initial_model_device.xp is cuda.cupy
and chainer.get_device(device_arg).xp is cuda.cupy
and chainer.get_device(device_arg) != initial_model_device)
if expect_future_warning:
context = testing.assert_warns(FutureWarning)
else:
@contextlib.contextmanager
def _context():
yield
context = _context()
# Create the StandardUpdater
with context:
updater = training.updaters.StandardUpdater(
iterator, optimizer, convert, **kwargs)
assert updater.device == expected_device_attr
assert updater.input_device == expected_input_device
# Check the model device
if expected_model_device is None:
# Model device is unchanged
expected_model_device = initial_model_device
# TODO(niboshi): model.device should be expected_model_device too.
assert model.p1.device == expected_model_device
assert model.p2.device == expected_model_device
# Process a batch
updater.update_core()
# Check the input device given to the converter
assert len(actual_converter_device_args) == 1
assert actual_converter_device_args[0] == expected_input_device
class TestStandardUpdaterDataTypes(unittest.TestCase):
"""Tests several data types with StandardUpdater"""
def setUp(self):
self.target = chainer.Link()
self.optimizer = DummyOptimizer()
self.optimizer.setup(self.target)
def test_update_tuple(self):
iterator = DummyIterator([(numpy.array(1), numpy.array(2))])
updater = training.updaters.StandardUpdater(iterator, self.optimizer)
updater.update_core()
assert self.optimizer.update.call_count == 1
args, kwargs = self.optimizer.update.call_args
assert len(args) == 3
loss, v1, v2 = args
assert len(kwargs) == 0
assert loss is self.optimizer.target
assert isinstance(v1, numpy.ndarray)
assert v1 == 1
assert isinstance(v2, numpy.ndarray)
assert v2 == 2
assert iterator.next_called == 1
def test_update_dict(self):
iterator = DummyIterator([{'x': numpy.array(1), 'y': numpy.array(2)}])
updater = training.updaters.StandardUpdater(iterator, self.optimizer)
updater.update_core()
assert self.optimizer.update.call_count == 1
args, kwargs = self.optimizer.update.call_args
assert len(args) == 1
loss, = args
assert set(kwargs.keys()) == {'x', 'y'}
v1 = kwargs['x']
v2 = kwargs['y']
assert loss is self.optimizer.target
assert isinstance(v1, numpy.ndarray)
assert v1 == 1
assert isinstance(v2, numpy.ndarray)
assert v2 == 2
assert iterator.next_called == 1
def test_update_var(self):
iterator = DummyIterator([numpy.array(1)])
updater = training.updaters.StandardUpdater(iterator, self.optimizer)
updater.update_core()
assert self.optimizer.update.call_count == 1
args, kwargs = self.optimizer.update.call_args
assert len(args) == 2
loss, v1 = args
assert len(kwargs) == 0
assert loss is self.optimizer.target
assert isinstance(v1, numpy.ndarray)
assert v1 == 1
assert iterator.next_called == 1
@testing.parameterize(
{'converter_style': 'old'},
{'converter_style': 'decorator'},
{'converter_style': 'class'})
@chainer.testing.backend.inject_backend_tests(
['test_converter_given_device'],
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# Custom converter is not supported for ChainerX.
])
class TestStandardUpdaterCustomConverter(unittest.TestCase):
"""Tests custom converters of various specs"""
def create_optimizer(self):
target = chainer.Link()
optimizer = DummyOptimizer()
optimizer.setup(target)
return optimizer
def create_updater(self, iterator, optimizer, converter, device):
return training.updaters.StandardUpdater(
iterator, optimizer, converter=converter, device=device)
def test_converter_given_device(self, backend_config):
self.check_converter_all(backend_config.device)
def test_converter_given_none(self):
self.check_converter_all(None)
def test_converter_given_int_negative(self):
self.check_converter_all(-1)
@attr.gpu
def test_converter_given_int_positive(self):
self.check_converter_all(9999)
def check_converter_all(self, device):
self.check_converter_in_arrays(device)
self.check_converter_in_obj(device)
self.check_converter_out_tuple(device)
self.check_converter_out_dict(device)
self.check_converter_out_obj(device)
def get_converter(self, converter_func):
if self.converter_style == 'old':
return converter_func
if self.converter_style == 'decorator':
@chainer.dataset.converter()
def wrapped_converter(*args, **kwargs):
return converter_func(*args, **kwargs)
return wrapped_converter
if self.converter_style == 'class':
class MyConverter(dataset.Converter):
def __call__(self, *args, **kwargs):
return converter_func(*args, **kwargs)
return MyConverter()
assert False
def test_converter_type(self):
# Ensures that new-style converters inherit from dataset.Converter.
def converter_impl(batch, device):
pass
converter = self.get_converter(converter_impl)
if self.converter_style in ('decorator', 'class'):
assert isinstance(converter, dataset.Converter)
def check_converter_received_device_arg(
self, received_device_arg, device_arg):
new_style = self.converter_style in ('decorator', 'class')
# None
if device_arg is None:
assert received_device_arg is None
return
# Normalize input device types
is_cpu = False
cuda_device_id = None
if isinstance(device_arg, int):
if device_arg < 0:
is_cpu = True
else:
cuda_device_id = device_arg
elif isinstance(device_arg, _cpu.CpuDevice):
is_cpu = True
elif isinstance(device_arg, cuda.GpuDevice):
cuda_device_id = device_arg.device.id
else:
assert False
# Check received device
if is_cpu:
if new_style:
assert received_device_arg == _cpu.CpuDevice()
else:
assert received_device_arg == -1
elif cuda_device_id is not None:
if new_style:
assert (received_device_arg
== cuda.GpuDevice.from_device_id(cuda_device_id))
else:
assert isinstance(received_device_arg, int)
assert received_device_arg == cuda_device_id
else:
assert new_style
assert received_device_arg is device_arg
def check_converter_in_arrays(self, device_arg):
iterator = DummyIterator([(numpy.array(1), numpy.array(2))])
optimizer = self.create_optimizer()
called = [0]
def converter_impl(batch, device):
self.check_converter_received_device_arg(device, device_arg)
assert isinstance(batch, list)
assert len(batch) == 1
samples = batch[0]
assert isinstance(samples, tuple)
assert len(samples) == 2
assert isinstance(samples[0], numpy.ndarray)
assert isinstance(samples[1], numpy.ndarray)
assert samples[0] == 1
assert samples[1] == 2
called[0] += 1
return samples
converter = self.get_converter(converter_impl)
updater = self.create_updater(
iterator, optimizer, converter, device_arg)
updater.update_core()
assert called[0] == 1
def check_converter_in_obj(self, device_arg):
obj1 = object()
obj2 = object()
iterator = DummyIterator([obj1, obj2])
optimizer = self.create_optimizer()
called = [0]
def converter_impl(batch, device):
self.check_converter_received_device_arg(device, device_arg)
assert isinstance(batch, list)
assert len(batch) == 2
assert batch[0] is obj1
assert batch[1] is obj2
called[0] += 1
return obj1, obj2
converter = self.get_converter(converter_impl)
updater = self.create_updater(
iterator, optimizer, converter, device_arg)
updater.update_core()
assert called[0] == 1
def check_converter_out_tuple(self, device_arg):
iterator = DummyIterator([object()])
optimizer = self.create_optimizer()
converter_out = (object(), object())
def converter_impl(batch, device):
self.check_converter_received_device_arg(device, device_arg)
return converter_out
converter = self.get_converter(converter_impl)
updater = self.create_updater(
iterator, optimizer, converter, device_arg)
updater.update_core()
assert optimizer.update.call_count == 1
args, kwargs = optimizer.update.call_args
assert len(args) == 3
loss, v1, v2 = args
assert len(kwargs) == 0
assert loss is optimizer.target
assert v1 is converter_out[0]
assert v2 is converter_out[1]
def check_converter_out_dict(self, device_arg):
iterator = DummyIterator([object()])
optimizer = self.create_optimizer()
converter_out = {'x': object(), 'y': object()}
def converter_impl(batch, device):
self.check_converter_received_device_arg(device, device_arg)
return converter_out
converter = self.get_converter(converter_impl)
updater = self.create_updater(
iterator, optimizer, converter, device_arg)
updater.update_core()
assert optimizer.update.call_count == 1
args, kwargs = optimizer.update.call_args
assert len(args) == 1
loss, = args
assert len(kwargs) == 2
assert loss is optimizer.target
assert sorted(kwargs.keys()) == ['x', 'y']
assert kwargs['x'] is converter_out['x']
assert kwargs['y'] is converter_out['y']
def check_converter_out_obj(self, device_arg):
iterator = DummyIterator([object()])
optimizer = self.create_optimizer()
converter_out = object()
def converter_impl(batch, device):
self.check_converter_received_device_arg(device, device_arg)
return converter_out
converter = self.get_converter(converter_impl)
updater = self.create_updater(
iterator, optimizer, converter, device_arg)
updater.update_core()
assert optimizer.update.call_count == 1
args, kwargs = optimizer.update.call_args
assert len(args) == 2
loss, v1 = args
assert len(kwargs) == 0
assert loss is optimizer.target
assert v1 is converter_out
testing.run_module(__name__, __file__)
| 23,542
| 31.031293
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/updaters_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/updaters_tests/snippets/cuda_init.py
|
import multiprocessing
import sys
import numpy
import chainer
from chainer.backends import cuda
import chainer.training.updaters.multiprocess_parallel_updater as mpu
class SimpleNetChild(chainer.Chain):
def __init__(self):
super(SimpleNetChild, self).__init__()
with self.init_scope():
self.c1 = chainer.links.Convolution2D(2, 2, 3)
self.fc = chainer.links.Linear(18, 2)
def clear(self):
self.loss = None
def forward(self, x, t):
h = chainer.functions.relu(self.c1(x))
y = self.fc(h)
self.loss = chainer.functions.softmax_cross_entropy(y, t)
return self.loss
def test():
model = SimpleNetChild()
dataset = [((numpy.ones((2, 5, 5)) * i).astype(numpy.float32),
numpy.int32(0)) for i in range(100)]
batch_size = 5
devices = tuple([chainer.get_device(d) for d in sys.argv[1].split(',')])
iters = [chainer.iterators.SerialIterator(i, batch_size) for i in
chainer.datasets.split_dataset_n_random(
dataset, len(devices))]
optimizer = chainer.optimizers.SGD(lr=1.0)
optimizer.setup(model)
# Initialize CUDA context.
cuda.cupy.cuda.runtime.runtimeGetVersion()
try:
mpu.MultiprocessParallelUpdater(iters, optimizer, devices=devices)
except RuntimeError as e:
if sys.argv[2] == 'fork':
assert 'CUDA context' in str(e)
return
updater = mpu.MultiprocessParallelUpdater(
iters, optimizer, devices=devices)
trainer = chainer.training.Trainer(updater, (1, 'epoch'), '/tmp')
trainer.run()
assert sys.argv[2] != 'fork'
if __name__ == '__main__':
multiprocessing.set_start_method(sys.argv[2])
test()
# This snippet is not a test code.
# testing.run_module(__name__, __file__)
| 1,831
| 25.550725
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/updaters_tests/snippets/raw_array.py
|
import sys
import numpy
import chainer
from chainer import testing
import chainer.training.updaters.multiprocess_parallel_updater as mpu
class SimpleNetRawArray(chainer.Chain):
def __init__(self):
super(SimpleNetRawArray, self).__init__()
with self.init_scope():
self.conv = chainer.links.Convolution2D(2, 2, 3)
self.fc = chainer.links.Linear(18, 2)
self.train = True
self.call_called = 0
def clear(self):
self.loss = None
self.accuracy = None
def forward(self, x, t):
assert not isinstance(x, chainer.Variable)
assert not isinstance(t, chainer.Variable)
self.call_called += 1
h = chainer.functions.relu(self.conv(x))
y = self.fc(h)
self.loss = chainer.functions.softmax_cross_entropy(y, t)
self.accuracy = chainer.functions.accuracy(y, t)
return self.loss
def test():
model = SimpleNetRawArray()
dataset = [((numpy.ones((2, 5, 5)) * i).astype(numpy.float32),
numpy.int32(0)) for i in range(100)]
batch_size = 5
devices = tuple([chainer.get_device(d) for d in sys.argv[1].split(',')])
iters = [chainer.iterators.SerialIterator(i, batch_size) for i in
chainer.datasets.split_dataset_n_random(
dataset, len(devices))]
optimizer = chainer.optimizers.SGD(lr=1.0)
optimizer.setup(model)
with testing.assert_warns(UserWarning):
updater = mpu.MultiprocessParallelUpdater(
iters, optimizer, devices=devices)
updater.update()
assert model.call_called == 1
if __name__ == '__main__':
test()
# This snippet is not a test code.
# testing.run_module(__name__, __file__)
| 1,736
| 24.925373
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/updaters_tests/snippets/child_reporter.py
|
import sys
import numpy
import chainer
from chainer.training import trainer
import chainer.training.updaters.multiprocess_parallel_updater as mpu
class SimpleNetChild(chainer.Chain):
def __init__(self):
super(SimpleNetChild, self).__init__()
with self.init_scope():
self.conv = chainer.links.Convolution2D(2, 2, 3)
def forward(self, x):
h = chainer.functions.relu(self.conv(x))
chainer.reporter.report({
'h_max': chainer.functions.math.minmax.max(h)}, self)
return h
class SimpleNetChildReporter(chainer.Chain):
def __init__(self):
super(SimpleNetChildReporter, self).__init__()
with self.init_scope():
self.c1 = SimpleNetChild()
self.fc = chainer.links.Linear(18, 2)
self.call_called = 0
def clear(self):
self.loss = None
def forward(self, x, t):
self.call_called += 1
h = chainer.functions.relu(self.c1(x))
y = self.fc(h)
self.loss = chainer.functions.softmax_cross_entropy(y, t)
chainer.reporter.report({'loss': self.loss}, self)
return self.loss
if __name__ == '__main__':
model = SimpleNetChildReporter()
dataset = [(numpy.full((2, 5, 5), i, numpy.float32),
numpy.int32(0)) for i in range(100)]
batch_size = 5
devices = tuple([chainer.get_device(d) for d in sys.argv[1].split(',')])
iters = [chainer.iterators.SerialIterator(i, batch_size) for i in
chainer.datasets.split_dataset_n_random(
dataset, len(devices))]
optimizer = chainer.optimizers.SGD(lr=1.0)
optimizer.setup(model)
updater = mpu.MultiprocessParallelUpdater(
iters, optimizer, devices=devices)
trainer = trainer.Trainer(updater, (1, 'iteration'), '/tmp')
trainer.run()
assert model.call_called == 1
# This snippet is not a test code.
# testing.run_module(__name__, __file__)
| 1,953
| 25.767123
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/triggers_tests/test_interval_trigger.py
|
from __future__ import division
import random
import tempfile
import unittest
import numpy as np
from chainer import serializers
from chainer import testing
from chainer.testing import condition
from chainer import training
@testing.parameterize(
# iteration
{
'iter_per_epoch': 5, 'interval': (2, 'iteration'), 'resume': 4,
'expected': [False, True, False, True, False, True, False]},
# basic epoch
{
'iter_per_epoch': 1, 'interval': (3, 'epoch'), 'resume': 4,
'expected': [False, False, True, False, False, True, False]},
# fractional epoch
{
'iter_per_epoch': 2, 'interval': (1.5, 'epoch'), 'resume': 4,
'expected': [False, False, True, False, False, True, False]},
# unaligned epoch
{
'iter_per_epoch': 2.5, 'interval': (1, 'epoch'), 'resume': 3,
'expected': [False, False, True, False, True, False, False]},
# tiny epoch
{
'iter_per_epoch': 0.5, 'interval': (1, 'epoch'), 'resume': 4,
'expected': [True, True, True, True, True, True, True]},
)
class TestIntervalTrigger(unittest.TestCase):
def test_trigger(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
trigger = training.triggers.IntervalTrigger(*self.interval)
# before the first iteration, trigger should be False
for expected in [False] + self.expected:
self.assertEqual(trigger(trainer), expected)
trainer.updater.update()
def test_resumed_trigger(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.IntervalTrigger(*self.interval)
for expected in self.expected[:self.resume]:
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
serializers.save_npz(f.name, trigger)
trigger = training.triggers.IntervalTrigger(*self.interval)
serializers.load_npz(f.name, trigger)
for expected in self.expected[self.resume:]:
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
@condition.repeat(10)
def test_trigger_sparse_call(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
trigger = training.triggers.IntervalTrigger(*self.interval)
accumulated = False
# before the first iteration, trigger should be False
for expected in [False] + self.expected:
accumulated = accumulated or expected
if random.randrange(2):
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
trainer.updater.update()
@condition.repeat(10)
def test_resumed_trigger_sparse_call(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
accumulated = False
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.IntervalTrigger(*self.interval)
for expected in self.expected[:self.resume]:
trainer.updater.update()
accumulated = accumulated or expected
if random.randrange(2):
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
serializers.save_npz(f.name, trigger)
trigger = training.triggers.IntervalTrigger(*self.interval)
serializers.load_npz(f.name, trigger)
for expected in self.expected[self.resume:]:
trainer.updater.update()
accumulated = accumulated or expected
if random.randrange(2):
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
def test_resumed_trigger_backward_compat(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.IntervalTrigger(*self.interval)
for expected in self.expected[:self.resume]:
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
# old version does not save anything
np.savez(f, dummy=0)
trigger = training.triggers.IntervalTrigger(*self.interval)
with testing.assert_warns(UserWarning):
serializers.load_npz(f.name, trigger)
for expected in self.expected[self.resume:]:
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
def test_str(self):
trigger = training.triggers.IntervalTrigger(*self.interval)
expected = 'IntervalTrigger({}, \'{}\')'.format(*self.interval)
actual = str(trigger)
assert expected == actual, 'Expected "{}" == "{}"'.format(
expected, actual)
class TestInvalidIntervalTrigger(unittest.TestCase):
def test_invalid_unit(self):
with self.assertRaises(ValueError):
training.triggers.IntervalTrigger(1, 'day')
testing.run_module(__name__, __file__)
| 5,498
| 38.847826
| 71
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/triggers_tests/test_once_trigger.py
|
from __future__ import division
import random
import tempfile
import unittest
import numpy as np
from chainer import serializers
from chainer import testing
from chainer.testing import condition
from chainer import training
@testing.parameterize(
# basic
{
'iter_per_epoch': 5, 'call_on_resume': False, 'resume': 4},
# call on resume
{
'iter_per_epoch': 5, 'call_on_resume': True, 'resume': 4},
# unaligned epoch
{
'iter_per_epoch': 2.5, 'call_on_resume': False, 'resume': 3},
# unaligned epoch, call on resume
{
'iter_per_epoch': 2.5, 'call_on_resume': True, 'resume': 3},
# tiny epoch
{
'iter_per_epoch': 0.5, 'call_on_resume': False, 'resume': 4},
# tiny epoch, call on resume
{
'iter_per_epoch': 0.5, 'call_on_resume': True, 'resume': 4},
)
class TestOnceTrigger(unittest.TestCase):
expected = [True] + [False] * 6
finished = [False] + [True] * 6
def setUp(self):
self.resumed_expected = [True] + [False] * 6
self.resumed_finished = [False] + [True] * 6
if self.call_on_resume:
self.resumed_expected[self.resume] = True
self.resumed_finished[self.resume] = False
def test_trigger(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
trigger = training.triggers.OnceTrigger(self.call_on_resume)
for expected, finished in zip(self.expected, self.finished):
self.assertEqual(trigger.finished, finished)
self.assertEqual(trigger(trainer), expected)
trainer.updater.update()
def test_resumed_trigger(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.OnceTrigger(self.call_on_resume)
for expected, finished in zip(self.resumed_expected[:self.resume],
self.resumed_finished[:self.resume]):
trainer.updater.update()
self.assertEqual(trigger.finished, finished)
self.assertEqual(trigger(trainer), expected)
serializers.save_npz(f.name, trigger)
trigger = training.triggers.OnceTrigger(self.call_on_resume)
serializers.load_npz(f.name, trigger)
for expected, finished in zip(self.resumed_expected[self.resume:],
self.resumed_finished[self.resume:]):
trainer.updater.update()
self.assertEqual(trigger.finished, finished)
self.assertEqual(trigger(trainer), expected)
@condition.repeat(10)
def test_trigger_sparse_call(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
trigger = training.triggers.OnceTrigger(self.call_on_resume)
accumulated = False
accumulated_finished = True
for expected, finished in zip(self.expected, self.finished):
accumulated = accumulated or expected
accumulated_finished = accumulated_finished and finished
if random.randrange(2):
self.assertEqual(trigger.finished, accumulated_finished)
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
accumulated_finished = True
trainer.updater.update()
@condition.repeat(10)
def test_resumed_trigger_sparse_call(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
accumulated = False
accumulated_finished = True
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.OnceTrigger(self.call_on_resume)
for expected, finished in zip(self.resumed_expected[:self.resume],
self.resumed_finished[:self.resume]):
trainer.updater.update()
accumulated = accumulated or expected
accumulated_finished = accumulated_finished and finished
if random.randrange(2):
self.assertEqual(trigger.finished, accumulated_finished)
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
accumulated_finished = True
serializers.save_npz(f.name, trigger)
trigger = training.triggers.OnceTrigger(self.call_on_resume)
serializers.load_npz(f.name, trigger)
for expected, finished in zip(self.resumed_expected[self.resume:],
self.resumed_finished[self.resume:]):
trainer.updater.update()
accumulated = accumulated or expected
accumulated_finished = accumulated_finished and finished
if random.randrange(2):
self.assertEqual(trigger.finished, accumulated_finished)
self.assertEqual(trigger(trainer), accumulated)
accumulated = False
accumulated_finished = True
def test_resumed_trigger_backward_compat(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.OnceTrigger(self.call_on_resume)
for expected, finished in zip(self.resumed_expected[:self.resume],
self.resumed_finished[:self.resume]):
trainer.updater.update()
self.assertEqual(trigger.finished, finished)
self.assertEqual(trigger(trainer), expected)
# old version does not save anything
np.savez(f, dummy=0)
trigger = training.triggers.OnceTrigger(self.call_on_resume)
with testing.assert_warns(UserWarning):
serializers.load_npz(f.name, trigger)
for expected, finished in zip(self.resumed_expected[self.resume:],
self.resumed_finished[self.resume:]):
trainer.updater.update()
self.assertEqual(trigger.finished, finished)
self.assertEqual(trigger(trainer), expected)
testing.run_module(__name__, __file__)
| 6,616
| 43.113333
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/triggers_tests/test_manual_schedule_trigger.py
|
from __future__ import division
import random
import tempfile
import unittest
import numpy as np
import six
from chainer import serializers
from chainer import testing
from chainer.testing import condition
from chainer import training
def expected_finished(pos, num):
return [i >= pos for i in six.moves.range(num)]
@testing.parameterize(
# single iteration
{
'iter_per_epoch': 2, 'schedule': (2, 'iteration'), 'resume': 3,
'expected': [False, True, False, False, False, False, False],
'finished': expected_finished(1, 7)},
# multiple iteration
{
'iter_per_epoch': 2, 'schedule': ([2, 4], 'iteration'), 'resume': 3,
'expected': [False, True, False, True, False, False, False],
'finished': expected_finished(3, 7)},
# single epoch
{
'iter_per_epoch': 3, 'schedule': (1, 'epoch'), 'resume': 3,
'expected': [False, False, True, False, False, False, False],
'finished': expected_finished(2, 7)},
# multiple epoch
{
'iter_per_epoch': 3, 'schedule': ([1, 2], 'epoch'), 'resume': 4,
'expected': [False, False, True, False, False, True, False],
'finished': expected_finished(5, 7)},
# single fractional epoch
{
'iter_per_epoch': 2, 'schedule': (1.5, 'epoch'), 'resume': 4,
'expected': [False, False, True, False, False, False, False],
'finished': expected_finished(2, 7)},
# multiple fractional epoch
{
'iter_per_epoch': 2, 'schedule': ([1.5, 2.5], 'epoch'), 'resume': 4,
'expected': [False, False, True, False, True, False, False],
'finished': expected_finished(4, 7)},
# single unaligned epoch
{
'iter_per_epoch': 2.5, 'schedule': (1, 'epoch'), 'resume': 4,
'expected': [False, False, True, False, False, False, False],
'finished': expected_finished(2, 7)},
# multiple unaligned epoch
{
'iter_per_epoch': 2.5, 'schedule': ([1, 2], 'epoch'), 'resume': 4,
'expected': [False, False, True, False, True, False, False],
'finished': expected_finished(4, 7)},
# single tiny epoch
{
'iter_per_epoch': 0.5, 'schedule': (1, 'epoch'), 'resume': 4,
'expected': [True, False, False, False, False, False, False],
'finished': expected_finished(0, 7)},
# multiple tiny epoch
{
'iter_per_epoch': 0.5, 'schedule': ([1, 2], 'epoch'), 'resume': 4,
'expected': [True, False, False, False, False, False, False],
'finished': expected_finished(0, 7)},
)
class TestManualScheduleTrigger(unittest.TestCase):
def test_trigger(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
trigger = training.triggers.ManualScheduleTrigger(*self.schedule)
for expected, finished in zip(self.expected, self.finished):
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
self.assertEqual(trigger.finished, finished)
def test_resumed_trigger(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.ManualScheduleTrigger(*self.schedule)
for expected, finished in zip(self.expected[:self.resume],
self.finished[:self.resume]):
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
self.assertEqual(trigger.finished, finished)
serializers.save_npz(f.name, trigger)
trigger = training.triggers.ManualScheduleTrigger(*self.schedule)
serializers.load_npz(f.name, trigger)
for expected, finished in zip(self.expected[self.resume:],
self.finished[self.resume:]):
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
self.assertEqual(trigger.finished, finished)
@condition.repeat(10)
def test_trigger_sparse_call(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
trigger = training.triggers.ManualScheduleTrigger(*self.schedule)
accumulated = False
for expected, finished in zip(self.expected, self.finished):
trainer.updater.update()
accumulated = accumulated or expected
if random.randrange(2):
self.assertEqual(trigger(trainer), accumulated)
self.assertEqual(trigger.finished, finished)
accumulated = False
@condition.repeat(10)
def test_resumed_trigger_sparse_call(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
accumulated = False
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.ManualScheduleTrigger(*self.schedule)
for expected, finished in zip(self.expected[:self.resume],
self.finished[:self.resume]):
trainer.updater.update()
accumulated = accumulated or expected
if random.randrange(2):
self.assertEqual(trigger(trainer), accumulated)
self.assertEqual(trigger.finished, finished)
accumulated = False
serializers.save_npz(f.name, trigger)
trigger = training.triggers.ManualScheduleTrigger(*self.schedule)
serializers.load_npz(f.name, trigger)
for expected, finished in zip(self.expected[self.resume:],
self.finished[self.resume:]):
trainer.updater.update()
accumulated = accumulated or expected
if random.randrange(2):
self.assertEqual(trigger(trainer), accumulated)
self.assertEqual(trigger.finished, finished)
accumulated = False
def test_resumed_trigger_backward_compat(self):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
with tempfile.NamedTemporaryFile(delete=False) as f:
trigger = training.triggers.ManualScheduleTrigger(*self.schedule)
for expected, finished in zip(self.expected[:self.resume],
self.finished[:self.resume]):
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
self.assertEqual(trigger.finished, finished)
# old version does not save anything
np.savez(f, dummy=0)
trigger = training.triggers.ManualScheduleTrigger(*self.schedule)
with testing.assert_warns(UserWarning):
serializers.load_npz(f.name, trigger)
for expected, finished in zip(self.expected[self.resume:],
self.finished[self.resume:]):
trainer.updater.update()
self.assertEqual(trigger(trainer), expected)
self.assertEqual(trigger.finished, finished)
class TestInvalidManualScheduleTrigger(unittest.TestCase):
def test_invalid_unit(self):
with self.assertRaises(ValueError):
training.triggers.ManualScheduleTrigger(1, 'day')
testing.run_module(__name__, __file__)
| 7,675
| 42.613636
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/triggers_tests/test_minmax_trigger.py
|
import tempfile
import unittest
from chainer import serializers
from chainer import testing
from chainer.training import triggers
class BestValueTriggerTester(object):
def _test_trigger(self, trigger, key, accuracies, expected,
resume=None, save=None):
trainer = testing.get_trainer_with_mock_updater(
stop_trigger=(len(accuracies), 'iteration'),
iter_per_epoch=self.iter_per_epoch)
updater = trainer.updater
def _serialize_updater(serializer):
updater.iteration = serializer('iteration', updater.iteration)
updater.epoch = serializer('epoch', updater.epoch)
updater.is_new_epoch = serializer(
'is_new_epoch', updater.is_new_epoch)
trainer.updater.serialize = _serialize_updater
def set_observation(t):
t.observation = {key: accuracies[t.updater.iteration-1]}
trainer.extend(set_observation, name='set_observation',
trigger=(1, 'iteration'), priority=2)
invoked_iterations = []
def record(t):
invoked_iterations.append(t.updater.iteration)
trainer.extend(record, name='record', trigger=trigger, priority=1)
if resume is not None:
serializers.load_npz(resume, trainer)
trainer.run()
self.assertEqual(invoked_iterations, expected)
if save is not None:
serializers.save_npz(save, trainer)
def test_trigger(self):
trigger = type(self).trigger_type(self.key, trigger=self.interval)
self._test_trigger(trigger, self.key, self.accuracies, self.expected)
def test_resumed_trigger(self):
trigger = type(self).trigger_type(self.key, trigger=self.interval)
with tempfile.TemporaryFile() as npz:
self._test_trigger(
trigger, self.key, self.accuracies[:self.resume],
self.expected_before_resume, save=npz)
npz.flush()
npz.seek(0)
trigger = type(self).trigger_type(self.key, trigger=self.interval)
self._test_trigger(trigger, self.key, self.accuracies,
self.expected_after_resume, resume=npz)
@testing.parameterize(
# interval = 1 iterations
{
'key': 'main/accuracy',
'iter_per_epoch': 1,
'interval': (1, 'iteration'),
'accuracies': [0.5, 0.5, 0.4, 0.6],
'expected': [1, 4],
'resume': 1,
'expected_before_resume': [1],
'expected_after_resume': [4]},
# interval = 2 iterations
{
'key': 'main/accuracy',
'iter_per_epoch': 1,
'interval': (2, 'iteration'),
'accuracies': [0.5, 0.5, 0.5, 0.5, 0.4, 0.4, 0.6, 0.6],
'expected': [2, 8],
'resume': 2,
'expected_before_resume': [2],
'expected_after_resume': [8]},
# interval = 2 iterations, unaligned resume
{
'key': 'main/accuracy',
'iter_per_epoch': 1,
'interval': (2, 'iteration'),
'accuracies': [0.5, 0.5, 0.5, 0.5, 0.4, 0.4, 0.6, 0.6],
'expected': [2, 8],
'resume': 3,
'expected_before_resume': [2],
'expected_after_resume': [8]},
# interval = 1 epoch, 1 epoch = 2 iterations
{
'key': 'main/accuracy',
'iter_per_epoch': 2,
'interval': (1, 'epoch'),
'accuracies': [0.5, 0.5, 0.5, 0.5, 0.4, 0.4, 0.6, 0.6],
'expected': [2, 8],
'resume': 2,
'expected_before_resume': [2],
'expected_after_resume': [8]},
# interval = 1 epoch, 1 epoch = 2 iterations, unaligned resume
{
'key': 'main/accuracy',
'iter_per_epoch': 2,
'interval': (1, 'epoch'),
'accuracies': [0.5, 0.5, 0.5, 0.5, 0.4, 0.4, 0.6, 0.6],
'expected': [2, 8],
'resume': 3,
'expected_before_resume': [2],
'expected_after_resume': [8]},
)
class TestMaxValueTrigger(unittest.TestCase, BestValueTriggerTester):
trigger_type = triggers.MaxValueTrigger
@testing.parameterize(
# interval = 1 iterations
{
'key': 'main/accuracy',
'iter_per_epoch': 1,
'interval': (1, 'iteration'),
'accuracies': [0.5, 0.5, 0.4, 0.6],
'expected': [1, 3],
'resume': 1,
'expected_before_resume': [1],
'expected_after_resume': [3]},
# interval = 2 iterations
{
'key': 'main/accuracy',
'iter_per_epoch': 1,
'interval': (2, 'iteration'),
'accuracies': [0.5, 0.5, 0.5, 0.5, 0.4, 0.4, 0.6, 0.6],
'expected': [2, 6],
'resume': 2,
'expected_before_resume': [2],
'expected_after_resume': [6]},
# interval = 2 iterations, unaligned resume
{
'key': 'main/accuracy',
'iter_per_epoch': 1,
'interval': (2, 'iteration'),
'accuracies': [0.5, 0.5, 0.5, 0.5, 0.4, 0.4, 0.6, 0.6],
'expected': [2, 6],
'resume': 3,
'expected_before_resume': [2],
'expected_after_resume': [6]},
# interval = 1 epoch, 1 epoch = 2 iterations
{
'key': 'main/accuracy',
'iter_per_epoch': 2,
'interval': (1, 'epoch'),
'accuracies': [0.5, 0.5, 0.5, 0.5, 0.4, 0.4, 0.6, 0.6],
'expected': [2, 6],
'resume': 2,
'expected_before_resume': [2],
'expected_after_resume': [6]},
# interval = 1 epoch, 1 epoch = 2 iterations, unaligned resume
{
'key': 'main/accuracy',
'iter_per_epoch': 2,
'interval': (1, 'epoch'),
'accuracies': [0.5, 0.5, 0.5, 0.5, 0.4, 0.4, 0.6, 0.6],
'expected': [2, 6],
'resume': 3,
'expected_before_resume': [2],
'expected_after_resume': [6]},
)
class TestMinValueTrigger(unittest.TestCase, BestValueTriggerTester):
trigger_type = triggers.MinValueTrigger
testing.run_module(__name__, __file__)
| 5,906
| 32.948276
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/triggers_tests/test_time_trigger.py
|
import io
import unittest
import chainer
from chainer import testing
class DummyTrainer(object):
def __init__(self):
self.elapsed_time = 0
class TestTimeTrigger(unittest.TestCase):
def setUp(self):
self.trigger = chainer.training.triggers.TimeTrigger(1)
self.trainer = DummyTrainer()
def test_call(self):
assert not self.trigger(self.trainer)
self.trainer.elapsed_time = 0.9
assert not self.trigger(self.trainer)
# first event is triggerred on time==1.0
self.trainer.elapsed_time = 1.2
assert self.trigger(self.trainer)
self.trainer.elapsed_time = 1.3
assert not self.trigger(self.trainer)
# second event is triggerred on time==2.0, and is not on time==2.2
self.trainer.elapsed_time = 2.1
assert self.trigger(self.trainer)
def test_resume(self):
self.trainer.elapsed_time = 1.2
self.trigger(self.trainer)
assert self.trigger._next_time == 2.0
f = io.BytesIO()
chainer.serializers.save_npz(f, self.trigger)
trigger = chainer.training.triggers.TimeTrigger(1)
chainer.serializers.load_npz(io.BytesIO(f.getvalue()), trigger)
assert trigger._next_time == 2.0
testing.run_module(__name__, __file__)
| 1,301
| 25.04
| 74
|
py
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/triggers_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/training_tests/triggers_tests/test_early_stopping_trigger.py
|
import unittest
import numpy
import pytest
import chainer
from chainer import testing
from chainer.training import triggers
from chainer.training import util
def _test_trigger(self, trigger, key, accuracies, expected):
trainer = testing.training.get_trainer_with_mock_updater(
stop_trigger=None, iter_per_epoch=1)
for accuracy, expected in zip(accuracies, expected):
trainer.updater.update()
trainer.observation = {key: accuracy}
self.assertEqual(trigger(trainer), expected)
class TestEarlyStoppingTrigger(unittest.TestCase):
def test_early_stopping_trigger_with_accuracy(self):
key = 'main/accuracy'
trigger = triggers.EarlyStoppingTrigger(monitor=key, patience=3,
check_trigger=(1, 'epoch'),
verbose=False)
trigger = util.get_trigger(trigger)
accuracies = [0.5, 0.5, 0.6, 0.7, 0.6, 0.4, 0.3, 0.2]
accuracies = numpy.asarray([
chainer.Variable(numpy.asarray(acc, dtype=numpy.float32))
for acc in accuracies])
expected = [False, False, False, False, False, False, True, True]
_test_trigger(self, trigger, key, accuracies, expected)
def test_early_stopping_trigger_with_loss(self):
key = 'main/loss'
trigger = triggers.EarlyStoppingTrigger(monitor=key, patience=3,
check_trigger=(1, 'epoch'))
trigger = util.get_trigger(trigger)
accuracies = [100, 80, 30, 10, 20, 24, 30, 35]
accuracies = numpy.asarray([
chainer.Variable(numpy.asarray(acc, dtype=numpy.float32))
for acc in accuracies])
expected = [False, False, False, False, False, False, True, True]
_test_trigger(self, trigger, key, accuracies, expected)
def test_early_stopping_trigger_with_max_epoch(self):
key = 'main/loss'
trigger = triggers.EarlyStoppingTrigger(monitor=key, patience=3,
check_trigger=(1, 'epoch'),
max_trigger=(3, 'epoch'))
trigger = util.get_trigger(trigger)
accuracies = [100, 80, 30]
accuracies = numpy.asarray([
chainer.Variable(numpy.asarray(acc, dtype=numpy.float32))
for acc in accuracies])
expected = [False, False, True]
_test_trigger(self, trigger, key, accuracies, expected)
def test_early_stopping_trigger_with_max_iteration(self):
key = 'main/loss'
trigger = triggers.EarlyStoppingTrigger(monitor=key, patience=3,
check_trigger=(1, 'epoch'),
max_trigger=(3, 'iteration'))
trigger = util.get_trigger(trigger)
accuracies = [100, 80, 30]
accuracies = numpy.asarray([
chainer.Variable(numpy.asarray(acc, dtype=numpy.float32))
for acc in accuracies])
expected = [False, False, True]
_test_trigger(self, trigger, key, accuracies, expected)
class TestEarlyStoppingTriggerPatienceAlias(unittest.TestCase):
"""Tests the alias argument `patients`."""
def test_alias(self):
# By keyword
trigger = triggers.EarlyStoppingTrigger(patients=10)
assert trigger.patience == 10
# By positional
trigger = triggers.EarlyStoppingTrigger((1, 'epoch'), 'main/loss', 10)
assert trigger.patience == 10
# Duplicated, by keyword
with pytest.raises(TypeError):
triggers.EarlyStoppingTrigger(patience=10, patients=3)
# Duplicated, by positional
with pytest.raises(TypeError):
triggers.EarlyStoppingTrigger(
(1, 'epoch'), 'main/loss', 10, patients=3)
testing.run_module(__name__, __file__)
| 3,914
| 35.588785
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/link_hooks_tests/test_spectral_normalization.py
|
import copy
import os
import unittest
import numpy
import pytest
import chainer
from chainer.backends import _cpu
from chainer.link_hooks.spectral_normalization import SpectralNormalization
import chainer.links as L
from chainer import serializers
from chainer import testing
from chainer.testing import attr
from chainer.testing.backend import BackendConfig
from chainer import utils
class TestExceptions(unittest.TestCase):
def setUp(self):
self.x = chainer.Variable(numpy.ones((10, 5), dtype=numpy.float32))
self.layer = L.Linear(5, 20)
def test_wrong_weight_name(self):
wrong_Weight_name = 'w'
hook = SpectralNormalization(weight_name=wrong_Weight_name)
with pytest.raises(ValueError):
self.layer.add_hook(hook)
def test_raises(self):
with pytest.raises(NotImplementedError):
with SpectralNormalization():
self.layer(self.x)
def test_invalid_shaped_weight(self):
with pytest.raises(ValueError):
L.Linear(10, 0).add_hook(SpectralNormalization())
class BaseTest(object):
def test_add_sn_hook(self):
layer, hook = self.layer, self.hook
layer.add_hook(hook)
if self.lazy_init:
assert not hasattr(layer, hook.vector_name)
if self.use_gamma:
assert not hasattr(layer, 'gamma')
with chainer.using_config('train', False):
layer(self.x)
assert hasattr(layer, hook.vector_name)
assert (self.out_size,) == getattr(layer, hook.vector_name).shape
if not self.use_gamma:
assert not hasattr(layer, 'gamma')
else: # Use gamma parameter
assert hasattr(layer, 'gamma')
assert layer.gamma.ndim == 0 and layer.gamma.size == 1
def _init_layer(self):
hook = SpectralNormalization(use_gamma=self.use_gamma)
layer = self.layer
layer.add_hook(hook)
if self.lazy_init:
# Initialize weight and bias.
with chainer.using_config('train', False):
layer(self.x)
return layer, hook
def check_weight_is_parameter(self, backend_config):
layer, hook = self._init_layer()
layer.to_device(backend_config.device)
source_weight = getattr(layer, hook.weight_name)
x = backend_config.get_array(self.x)
layer(x)
assert getattr(layer, hook.weight_name) is source_weight
def test_weight_is_parameter(self, backend_config):
if not self.lazy_init:
self.check_weight_is_parameter(backend_config)
def check_in_recomputing(self, backend_config):
layer, hook = self.layer, self.hook
layer.add_hook(hook)
layer.to_device(backend_config.device)
x = backend_config.get_array(self.x)
y1 = layer(x).array
u1 = getattr(layer, hook.vector_name).copy()
v1 = hook.v.copy()
with chainer.using_config('in_recomputing', True):
y2 = layer(x).array
u2 = getattr(layer, hook.vector_name)
v2 = hook.v
u1, u2 = _cpu._to_cpu(u1), _cpu._to_cpu(u2)
v1, v2 = _cpu._to_cpu(v1), _cpu._to_cpu(v2)
numpy.testing.assert_array_equal(u1, u2)
numpy.testing.assert_array_equal(v1, v2)
testing.assert_allclose(y1, y2)
def test_in_recomputing(self, backend_config):
if not self.lazy_init:
self.check_in_recomputing(backend_config)
def check_deleted(self, backend_config):
layer, hook = self.layer, self.hook
layer.add_hook(hook)
layer.to_device(backend_config.device)
x = backend_config.get_array(self.x)
with chainer.using_device(backend_config.device):
y1 = layer(x).array
with chainer.using_config('train', False):
y2 = layer(x).array
layer.delete_hook(hook.name)
assert not hasattr(layer, hook.vector_name)
y3 = layer(x).array
y1, y2, y3 = _cpu._to_cpu(y1), _cpu._to_cpu(y2), _cpu._to_cpu(y3)
assert not numpy.array_equal(y1, y3)
assert not numpy.array_equal(y2, y3)
def test_deleted(self, backend_config):
self.check_deleted(backend_config)
def check_u_updated_in_train(self, backend_config):
layer, hook = self.layer, self.hook
layer.add_hook(hook)
layer.to_device(backend_config.device)
x = backend_config.get_array(self.x)
y1 = layer(x).array
u1 = getattr(layer, hook.vector_name).copy()
y2 = layer(x).array
u2 = getattr(layer, hook.vector_name)
y1, y2 = _cpu._to_cpu(y1), _cpu._to_cpu(y2)
u1, u2 = _cpu._to_cpu(u1), _cpu._to_cpu(u2)
assert not numpy.array_equal(u1, u2)
assert not numpy.array_equal(y1, y2)
def test_u_updated_in_train(self, backend_config):
if not self.lazy_init:
self.check_u_updated_in_train(backend_config)
def check_u_not_updated_in_test(self, backend_config):
layer, hook = self.layer, self.hook
layer.add_hook(hook)
layer.to_device(backend_config.device)
x = backend_config.get_array(self.x)
with chainer.using_config('train', False):
y1 = layer(x).array
u1 = getattr(layer, hook.vector_name).copy()
v1 = hook.v.copy()
y2 = layer(x).array
u2 = getattr(layer, hook.vector_name)
v2 = hook.v.copy()
u1, u2 = _cpu._to_cpu(u1), _cpu._to_cpu(u2)
v1, v2 = _cpu._to_cpu(v1), _cpu._to_cpu(v2)
numpy.testing.assert_array_equal(u1, u2)
numpy.testing.assert_array_equal(v1, v2)
testing.assert_allclose(y1, y2)
def test_u_not_updated_in_test(self, backend_config):
if not self.lazy_init:
self.check_u_not_updated_in_test(backend_config)
def check_multi_devices_forward(self, device_0, device_1):
layer, hook = self.layer, self.hook
layer.add_hook(hook)
layer.to_device(device_1)
x = device_1.send(self.x)
msg = None
with chainer.using_device(device_0):
try:
layer(x)
except Exception as e:
msg = e
assert msg is None
@attr.chainerx
@attr.multi_gpu(2)
def test_forward_chx_on_multi_devices(self):
if not self.lazy_init:
device_0 = BackendConfig(
{'use_chainerx': True, 'chainerx_device': 'cuda:0'}).device
device_1 = BackendConfig(
{'use_chainerx': True, 'chainerx_device': 'cuda:1'}).device
self.check_multi_devices_forward(device_0, device_1)
@attr.multi_gpu(2)
def test_forward_multi_gpus(self):
if not self.lazy_init:
device_0 = BackendConfig(
{'use_cuda': True, 'cuda_device': 0}).device
device_1 = BackendConfig(
{'use_cuda': True, 'cuda_device': 1}).device
self.check_multi_devices_forward(device_0, device_1)
def check_serialization(self, backend_config):
with utils.tempdir() as root:
filename = os.path.join(root, 'tmp.npz')
layer1 = self.layer.copy('copy')
hook1 = copy.deepcopy(self.hook)
layer1.add_hook(hook1)
layer1.to_device(backend_config.device)
x = backend_config.get_array(self.x)
with backend_config:
layer1(x)
with chainer.using_config('train', False):
y1 = layer1(x)
serializers.save_npz(filename, layer1)
layer2 = self.layer.copy('copy')
hook2 = copy.deepcopy(self.hook)
layer2.add_hook(hook2)
# Test loading is nice.
msg = None
try:
serializers.load_npz(filename, layer2)
except Exception as e:
msg = e
assert msg is None
with chainer.using_config('train', False):
y2 = layer2(self.x.copy())
# Test attributes are the same.
orig_weight = _cpu._to_cpu(
getattr(layer1, hook1.weight_name).array)
orig_vector = _cpu._to_cpu(getattr(layer1, hook1.vector_name))
numpy.testing.assert_array_equal(
orig_weight, getattr(layer2, hook2.weight_name).array)
numpy.testing.assert_array_equal(
orig_vector, getattr(layer2, hook2.vector_name))
testing.assert_allclose(y1.array, y2.array)
def test_serialization(self, backend_config):
if not self.lazy_init:
self.check_serialization(backend_config)
_inject_backend_tests = testing.inject_backend_tests(
['test_weight_is_parameter', 'test_in_recomputing', 'test_deleted',
'test_u_updated_in_train', 'test_u_not_updated_in_test',
'test_serialization'],
# CPU tests
testing.product({
'use_ideep': ['always', 'never'],
})
# GPU tests
+ testing.product({
'use_cuda': [True],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
)
@testing.parameterize(*testing.product({
'use_gamma': [True, False],
}))
@_inject_backend_tests
class TestEmbedID(unittest.TestCase, BaseTest):
def setUp(self):
self.lazy_init = False # For convenience.
self.bs, self.in_size, self.out_size = 5, 10, 20
self.x = numpy.arange(self.in_size, dtype=numpy.int32)
self.layer = L.EmbedID(self.in_size, self.out_size)
self.hook = SpectralNormalization(use_gamma=self.use_gamma)
def test_add_sn_hook(self):
hook = SpectralNormalization(use_gamma=self.use_gamma)
layer = self.layer
layer.add_hook(hook)
if self.lazy_init:
assert not hasattr(layer, hook.vector_name)
if self.use_gamma:
assert not hasattr(layer, 'gamma')
with chainer.using_config('train', False):
layer(self.x)
assert hasattr(layer, hook.vector_name)
assert (self.in_size,) == getattr(layer, hook.vector_name).shape
if not self.use_gamma:
assert not hasattr(layer, 'gamma')
else: # Use gamma parameter
assert hasattr(layer, 'gamma')
assert layer.gamma.ndim == 0 and layer.gamma.size == 1
@testing.parameterize(*testing.product({
'lazy_init': [True, False],
'use_gamma': [True, False],
}))
@_inject_backend_tests
class TestLinear(unittest.TestCase, BaseTest):
def setUp(self):
self.bs, self.in_size, self.out_size = 10, 20, 30
self.x = numpy.random.normal(
size=(self.bs, self.in_size)).astype(numpy.float32)
self.layer = L.Linear(self.out_size) # Lazy initialization
in_size = None if self.lazy_init else self.in_size
self.layer = L.Linear(in_size, self.out_size)
self.hook = SpectralNormalization(use_gamma=self.use_gamma)
@testing.parameterize(*testing.product({
'use_gamma': [True, False],
'lazy_init': [True, False],
'link': [L.Convolution1D, L.Deconvolution1D],
}))
@_inject_backend_tests
class TestConvolution1D(unittest.TestCase, BaseTest):
def setUp(self):
self.in_channels, self.out_channels = 3, 10
in_channels = None if self.lazy_init else self.in_channels
conv_init_args = {'ksize': 3, 'stride': 1, 'pad': 1}
self.layer = self.link(
in_channels, self.out_channels, **conv_init_args)
self.x = numpy.random.normal(
size=(5, self.in_channels, 4)).astype(numpy.float32)
self.hook = SpectralNormalization(use_gamma=self.use_gamma)
self.out_size = self.out_channels # For compatibility
@testing.parameterize(*testing.product({
'use_gamma': [True, False],
'lazy_init': [True, False],
'link': [L.Convolution2D, L.Deconvolution2D],
}))
@_inject_backend_tests
class TestConvolution2D(unittest.TestCase, BaseTest):
def setUp(self):
self.in_channels, self.out_channels = 3, 10
in_channels = None if self.lazy_init else self.in_channels
conv_init_args = {'ksize': 3, 'stride': 1, 'pad': 1}
self.layer = self.link(
in_channels, self.out_channels, **conv_init_args)
self.x = numpy.random.normal(
size=(5, self.in_channels, 4, 4)).astype(numpy.float32)
self.hook = SpectralNormalization(use_gamma=self.use_gamma)
self.out_size = self.out_channels # For compatibility
@testing.parameterize(*testing.product({
'use_gamma': [True, False],
'lazy_init': [True, False],
'link': [L.Convolution3D, L.Deconvolution3D],
}))
@_inject_backend_tests
class TestConvolution3D(unittest.TestCase, BaseTest):
def setUp(self):
self.in_channels, self.out_channels = 3, 10
in_channels = None if self.lazy_init else self.in_channels
conv_init_args = {'ksize': 3, 'stride': 1, 'pad': 1}
self.layer = self.link(
in_channels, self.out_channels, **conv_init_args)
self.x = numpy.random.normal(
size=(5, self.in_channels, 4, 4, 4)).astype(numpy.float32)
self.hook = SpectralNormalization(use_gamma=self.use_gamma)
self.out_size = self.out_channels # For compatibility
testing.run_module(__name__, __file__)
| 13,406
| 34.468254
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/link_hooks_tests/test_timer.py
|
import re
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import link_hooks
from chainer import testing
from chainer.testing import attr
class MyModel(chainer.Chain):
def __init__(self):
super(MyModel, self).__init__()
with self.init_scope():
self.l1 = chainer.links.Linear(2, 3)
self.l2 = chainer.links.Linear(3, 4)
def forward(self, x):
h = self.l1(x)
return self.l2(h)
@testing.parameterize(
{'unit': 'sec'},
{'unit': 'ms'},
{'unit': 'us'},
{'unit': 'ns'},
{'unit': 'auto'},
{'unit': 'auto_foreach'},
)
class TestTimerHook(unittest.TestCase):
def test_name(self):
assert link_hooks.TimerHook().name == 'TimerHook'
def check_forward(self, xp):
link = MyModel()
if xp is cuda.cupy:
with testing.assert_warns(DeprecationWarning):
link = link.to_gpu()
hook = link_hooks.TimerHook()
with hook:
link(chainer.Variable(xp.array([[7, 5]], numpy.float32)))
link(chainer.Variable(xp.array([[8, 1]], numpy.float32)))
# call_history
hist = hook.call_history
assert len(hist) == 6
assert all(len(h) == 2 for h in hist)
names = [h[0] for h in hist]
times = [h[1] for h in hist]
assert names == [
'Linear', 'Linear', 'MyModel', 'Linear', 'Linear', 'MyModel']
assert times[0] + times[1] < times[2]
assert times[3] + times[4] < times[5]
# summary
summary = hook.summary()
assert sorted(summary.keys()) == ['Linear', 'MyModel']
assert summary['Linear']['occurrence'] == 4
numpy.testing.assert_allclose(
summary['Linear']['elapsed_time'],
times[0] + times[1] + times[3] + times[4])
assert summary['MyModel']['occurrence'] == 2
numpy.testing.assert_allclose(
summary['MyModel']['elapsed_time'],
times[2] + times[5])
# print_report
s = six.StringIO()
hook.print_report(unit=self.unit, file=s)
report = s.getvalue()
assert len(report.splitlines()) == 3
assert re.search(r'Linear +[.0-9a-z]+ +4', report) is not None
assert re.search(r'MyModel +[.0-9a-z]+ +2', report) is not None
def test_forward_cpu(self):
self.check_forward(numpy)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.cupy)
testing.run_module(__name__, __file__)
| 2,553
| 27.065934
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/link_hooks_tests/test_weight_standardization.py
|
import unittest
import numpy
import pytest
import chainer
from chainer.backends import cuda
from chainer.link_hooks.weight_standardization import WeightStandardization
import chainer.links as L
from chainer import testing
from chainer.testing import attr
class TestExceptions(unittest.TestCase):
def setUp(self):
self.x = chainer.Variable(numpy.ones((10, 5), dtype=numpy.float32))
self.layer = L.Linear(5, 20)
def test_wrong_weight_name(self):
wrong_Weight_name = 'w'
hook = WeightStandardization(weight_name=wrong_Weight_name)
with pytest.raises(ValueError):
self.layer.add_hook(hook)
def test_raises(self):
with pytest.raises(NotImplementedError):
with WeightStandardization():
self.layer(self.x)
class BaseTest(object):
def test_add_ws_hook(self):
layer, hook = self.layer, self.hook
layer.add_hook(hook)
if self.lazy_init:
with chainer.using_config('train', False):
layer(self.x)
def _init_layer(self):
hook = WeightStandardization()
layer = self.layer
layer.add_hook(hook)
if self.lazy_init:
# Initialize weight and bias.
with chainer.using_config('train', False):
layer(self.x)
return layer, hook
def check_weight_is_parameter(self, gpu):
layer, hook = self._init_layer()
if gpu:
with testing.assert_warns(DeprecationWarning):
layer = layer.to_gpu()
source_weight = getattr(layer, hook.weight_name)
x = cuda.to_gpu(self.x) if gpu else self.x
layer(x)
assert getattr(layer, hook.weight_name) is source_weight
def test_weight_is_parameter_cpu(self):
if not self.lazy_init:
self.check_weight_is_parameter(False)
@attr.gpu
def test_weight_is_parameter_gpu(self):
if not self.lazy_init:
self.check_weight_is_parameter(True)
def check_deleted(self, gpu):
layer, hook = self.layer, self.hook
layer.add_hook(hook)
if gpu:
with testing.assert_warns(DeprecationWarning):
layer = layer.to_gpu()
x = cuda.to_gpu(self.x) if gpu else self.x
y1 = layer(x).array
with chainer.using_config('train', False):
y2 = layer(x).array
layer.delete_hook(hook.name)
y3 = layer(x).array
if gpu:
y1, y2, y3 = cuda.to_cpu(y1), cuda.to_cpu(y2), cuda.to_cpu(y3)
assert not numpy.array_equal(y1, y3)
assert not numpy.array_equal(y2, y3)
def test_deleted_cpu(self):
self.check_deleted(False)
@attr.gpu
def test_deleted_gpu(self):
self.check_deleted(True)
class TestEmbedID(unittest.TestCase, BaseTest):
def setUp(self):
self.lazy_init = False # For convenience.
self.bs, self.in_size, self.out_size = 5, 10, 20
self.x = numpy.arange(self.in_size, dtype=numpy.int32)
self.layer = L.EmbedID(self.in_size, self.out_size)
self.hook = WeightStandardization()
def test_add_ws_hook(self):
hook = WeightStandardization()
layer = self.layer
layer.add_hook(hook)
if self.lazy_init:
with chainer.using_config('train', False):
layer(self.x)
@testing.parameterize(*testing.product({
'lazy_init': [True, False],
}))
class TestLinear(unittest.TestCase, BaseTest):
def setUp(self):
self.bs, self.in_size, self.out_size = 10, 20, 30
self.x = numpy.random.normal(
size=(self.bs, self.in_size)).astype(numpy.float32)
self.layer = L.Linear(self.out_size) # Lazy initialization
in_size = None if self.lazy_init else self.in_size
self.layer = L.Linear(in_size, self.out_size)
self.hook = WeightStandardization()
@testing.parameterize(*testing.product({
'lazy_init': [True, False],
'link': [L.Convolution1D],
}))
class TestConvolution1D(unittest.TestCase, BaseTest):
def setUp(self):
self.in_channels, self.out_channels = 3, 10
in_channels = None if self.lazy_init else self.in_channels
conv_init_args = {'ksize': 3, 'stride': 1, 'pad': 1}
self.layer = self.link(
in_channels, self.out_channels, **conv_init_args)
self.x = numpy.random.normal(
size=(5, self.in_channels, 4)).astype(numpy.float32)
self.hook = WeightStandardization()
self.out_size = self.out_channels # For compatibility
@testing.parameterize(*testing.product({
'lazy_init': [True, False],
'link': [L.Convolution2D],
}))
class TestConvolution2D(unittest.TestCase, BaseTest):
def setUp(self):
self.in_channels, self.out_channels = 3, 10
in_channels = None if self.lazy_init else self.in_channels
conv_init_args = {'ksize': 3, 'stride': 1, 'pad': 1}
self.layer = self.link(
in_channels, self.out_channels, **conv_init_args)
self.x = numpy.random.normal(
size=(5, self.in_channels, 4, 4)).astype(numpy.float32)
self.hook = WeightStandardization()
self.out_size = self.out_channels # For compatibility
@testing.parameterize(*testing.product({
'lazy_init': [True, False],
'link': [L.Convolution3D],
}))
class TestConvolution3D(unittest.TestCase, BaseTest):
def setUp(self):
self.in_channels, self.out_channels = 3, 10
in_channels = None if self.lazy_init else self.in_channels
conv_init_args = {'ksize': 3, 'stride': 1, 'pad': 1}
self.layer = self.link(
in_channels, self.out_channels, **conv_init_args)
self.x = numpy.random.normal(
size=(5, self.in_channels, 4, 4, 4)).astype(numpy.float32)
self.hook = WeightStandardization()
self.out_size = self.out_channels # For compatibility
testing.run_module(__name__, __file__)
| 5,969
| 31.445652
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/dataset_tests/test_convert.py
|
import pickle
import sys
import unittest
import numpy
import pytest
from chainer import backend
from chainer.backends import cuda
from chainer import dataset
from chainer import testing
from chainer.testing import attr
import chainer.testing.backend # NOQA
import chainerx
_inject_backend_tests = testing.backend.inject_backend_tests(
None,
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class ConverterTestBase(object):
def get_arrays_to_concat(self, backend_config):
return [
backend_config.get_array(numpy.random.rand(2, 3))
for _ in range(5)]
def check_concat_arrays(self, arrays, device, expected_device):
array = self.converter(arrays, device)
self.assertEqual(array.shape, (len(arrays),) + arrays[0].shape)
assert backend.get_device_from_array(array) == expected_device
np_array = backend.CpuDevice().send(array)
for x, y in zip(np_array, arrays):
numpy.testing.assert_array_equal(x, backend.CpuDevice().send(y))
def test_concat_arrays(self, backend_config):
arrays = self.get_arrays_to_concat(backend_config)
self.check_concat_arrays(arrays, None, backend_config.device)
def test_concat_arrays_pickle(self, backend_config):
converter = pickle.dumps(self.converter)
self.converter = pickle.loads(converter)
arrays = self.get_arrays_to_concat(backend_config)
self.check_concat_arrays(arrays, None, backend_config.device)
@attr.gpu
def test_concat_arrays_to_gpu(self, backend_config):
arrays = self.get_arrays_to_concat(backend_config)
self.check_concat_arrays(
arrays, 0, backend.GpuDevice.from_device_id(0))
@attr.chainerx
def test_concat_arrays_to_chainerx(self, backend_config):
device = chainerx.get_device('native:0')
arrays = self.get_arrays_to_concat(backend_config)
self.check_concat_arrays(
arrays, device, backend.ChainerxDevice(device))
def get_tuple_arrays_to_concat(self, backend_config):
return [
(backend_config.get_array(numpy.random.rand(2, 3)),
backend_config.get_array(numpy.random.rand(3, 4)))
for _ in range(5)]
def check_concat_tuples(self, tuples, device, expected_device):
arrays = self.converter(tuples, device)
self.assertEqual(len(arrays), len(tuples[0]))
for i in range(len(arrays)):
shape = (len(tuples),) + tuples[0][i].shape
self.assertEqual(arrays[i].shape, shape)
assert backend.get_device_from_array(arrays[i]) == expected_device
arr = backend.CpuDevice().send(arrays[i])
for x, y in zip(arr, tuples):
numpy.testing.assert_array_equal(
x, backend.CpuDevice().send(y[i]))
def test_concat_tuples(self, backend_config):
tuples = self.get_tuple_arrays_to_concat(backend_config)
self.check_concat_tuples(tuples, None, backend_config.device)
@attr.gpu
def test_concat_tuples_to_gpu(self, backend_config):
tuples = self.get_tuple_arrays_to_concat(backend_config)
self.check_concat_tuples(
tuples, 0, backend.GpuDevice.from_device_id(0))
@attr.chainerx
def test_concat_tuples_to_chainerx(self, backend_config):
device = chainerx.get_device('native:0')
arrays = self.get_tuple_arrays_to_concat(backend_config)
self.check_concat_tuples(
arrays, device, backend.ChainerxDevice(device))
def get_dict_arrays_to_concat(self, backend_config):
return [
{'x': backend_config.get_array(numpy.random.rand(2, 3)),
'y': backend_config.get_array(numpy.random.rand(3, 4))}
for _ in range(5)]
def check_concat_dicts(self, dicts, device, expected_device):
arrays = self.converter(dicts, device)
self.assertEqual(frozenset(arrays.keys()), frozenset(dicts[0].keys()))
for key in arrays:
shape = (len(dicts),) + dicts[0][key].shape
self.assertEqual(arrays[key].shape, shape)
self.assertEqual(
backend.get_device_from_array(arrays[key]), expected_device)
arr = backend.CpuDevice().send(arrays[key])
for x, y in zip(arr, dicts):
numpy.testing.assert_array_equal(
x, backend.CpuDevice().send(y[key]))
def test_concat_dicts(self, backend_config):
dicts = self.get_dict_arrays_to_concat(backend_config)
self.check_concat_dicts(dicts, None, backend_config.device)
@attr.gpu
def test_concat_dicts_to_gpu(self, backend_config):
dicts = self.get_dict_arrays_to_concat(backend_config)
self.check_concat_dicts(
dicts, 0, backend.GpuDevice.from_device_id(0))
@attr.chainerx
def test_concat_dicts_to_chainerx(self, backend_config):
device = chainerx.get_device('native:0')
arrays = self.get_dict_arrays_to_concat(backend_config)
self.check_concat_dicts(
arrays, device, backend.ChainerxDevice(device))
@_inject_backend_tests
class TestConcatExamples(ConverterTestBase, unittest.TestCase):
def setUp(self):
self.converter = dataset.concat_examples
class _XFailConcatWithAsyncTransfer(object):
@attr.chainerx
@pytest.mark.xfail(strict=True)
def test_concat_arrays_to_chainerx(self, *args, **kwargs):
(
super(_XFailConcatWithAsyncTransfer, self)
.test_concat_arrays_to_chainerx(*args, **kwargs)
)
@attr.chainerx
@pytest.mark.xfail(strict=True)
def test_concat_tuples_to_chainerx(self, *args, **kwargs):
(
super(_XFailConcatWithAsyncTransfer, self)
.test_concat_tuples_to_chainerx(*args, **kwargs)
)
@attr.chainerx
@pytest.mark.xfail(strict=True)
def test_concat_dicts_to_chainerx(self, *args, **kwargs):
(
super(_XFailConcatWithAsyncTransfer, self)
.test_concat_dicts_to_chainerx(*args, **kwargs)
)
@testing.backend.inject_backend_tests(
None,
[
# NumPy
{},
])
class TestConcatWithAsyncTransfer(
_XFailConcatWithAsyncTransfer,
ConverterTestBase, unittest.TestCase):
def setUp(self):
self.converter = chainer.dataset.ConcatWithAsyncTransfer()
@_inject_backend_tests
class TestConcatExamplesWithPadding(unittest.TestCase):
def test_concat_arrays_padding(self, backend_config):
arrays = backend_config.get_array(
[numpy.random.rand(3, 4),
numpy.random.rand(2, 5),
numpy.random.rand(4, 3)])
array = dataset.concat_examples(arrays, padding=0)
self.assertEqual(array.shape, (3, 4, 5))
self.assertEqual(type(array), type(arrays[0]))
arrays = [backend.CpuDevice().send(a) for a in arrays]
array = backend.CpuDevice().send(array)
numpy.testing.assert_array_equal(array[0, :3, :4], arrays[0])
numpy.testing.assert_array_equal(array[0, 3:, :], 0)
numpy.testing.assert_array_equal(array[0, :, 4:], 0)
numpy.testing.assert_array_equal(array[1, :2, :5], arrays[1])
numpy.testing.assert_array_equal(array[1, 2:, :], 0)
numpy.testing.assert_array_equal(array[2, :4, :3], arrays[2])
numpy.testing.assert_array_equal(array[2, :, 3:], 0)
def test_concat_tuples_padding(self, backend_config):
tuples = [
backend_config.get_array(
(numpy.random.rand(3, 4), numpy.random.rand(2, 5))),
backend_config.get_array(
(numpy.random.rand(4, 4), numpy.random.rand(3, 4))),
backend_config.get_array(
(numpy.random.rand(2, 5), numpy.random.rand(2, 6))),
]
arrays = dataset.concat_examples(tuples, padding=0)
self.assertEqual(len(arrays), 2)
self.assertEqual(arrays[0].shape, (3, 4, 5))
self.assertEqual(arrays[1].shape, (3, 3, 6))
self.assertEqual(type(arrays[0]), type(tuples[0][0]))
self.assertEqual(type(arrays[1]), type(tuples[0][1]))
for i in range(len(tuples)):
tuples[i] = (
backend.CpuDevice().send(tuples[i][0]),
backend.CpuDevice().send(tuples[i][1]))
arrays = tuple(backend.CpuDevice().send(array) for array in arrays)
numpy.testing.assert_array_equal(arrays[0][0, :3, :4], tuples[0][0])
numpy.testing.assert_array_equal(arrays[0][0, 3:, :], 0)
numpy.testing.assert_array_equal(arrays[0][0, :, 4:], 0)
numpy.testing.assert_array_equal(arrays[0][1, :4, :4], tuples[1][0])
numpy.testing.assert_array_equal(arrays[0][1, :, 4:], 0)
numpy.testing.assert_array_equal(arrays[0][2, :2, :5], tuples[2][0])
numpy.testing.assert_array_equal(arrays[0][2, 2:, :], 0)
numpy.testing.assert_array_equal(arrays[1][0, :2, :5], tuples[0][1])
numpy.testing.assert_array_equal(arrays[1][0, 2:, :], 0)
numpy.testing.assert_array_equal(arrays[1][0, :, 5:], 0)
numpy.testing.assert_array_equal(arrays[1][1, :3, :4], tuples[1][1])
numpy.testing.assert_array_equal(arrays[1][1, 3:, :], 0)
numpy.testing.assert_array_equal(arrays[1][1, :, 4:], 0)
numpy.testing.assert_array_equal(arrays[1][2, :2, :6], tuples[2][1])
numpy.testing.assert_array_equal(arrays[1][2, 2:, :], 0)
def test_concat_dicts_padding(self, backend_config):
dicts = [
{'x': numpy.random.rand(3, 4), 'y': numpy.random.rand(2, 5)},
{'x': numpy.random.rand(4, 4), 'y': numpy.random.rand(3, 4)},
{'x': numpy.random.rand(2, 5), 'y': numpy.random.rand(2, 6)},
]
dicts = [
{key: backend_config.get_array(arr) for key, arr in d.items()}
for d in dicts]
arrays = dataset.concat_examples(dicts, padding=0)
self.assertIn('x', arrays)
self.assertIn('y', arrays)
self.assertEqual(arrays['x'].shape, (3, 4, 5))
self.assertEqual(arrays['y'].shape, (3, 3, 6))
self.assertEqual(type(arrays['x']), type(dicts[0]['x']))
self.assertEqual(type(arrays['y']), type(dicts[0]['y']))
for d in dicts:
d['x'] = backend.CpuDevice().send(d['x'])
d['y'] = backend.CpuDevice().send(d['y'])
arrays = {
'x': backend.CpuDevice().send(arrays['x']),
'y': backend.CpuDevice().send(arrays['y'])}
numpy.testing.assert_array_equal(arrays['x'][0, :3, :4], dicts[0]['x'])
numpy.testing.assert_array_equal(arrays['x'][0, 3:, :], 0)
numpy.testing.assert_array_equal(arrays['x'][0, :, 4:], 0)
numpy.testing.assert_array_equal(arrays['x'][1, :4, :4], dicts[1]['x'])
numpy.testing.assert_array_equal(arrays['x'][1, :, 4:], 0)
numpy.testing.assert_array_equal(arrays['x'][2, :2, :5], dicts[2]['x'])
numpy.testing.assert_array_equal(arrays['x'][2, 2:, :], 0)
numpy.testing.assert_array_equal(arrays['y'][0, :2, :5], dicts[0]['y'])
numpy.testing.assert_array_equal(arrays['y'][0, 2:, :], 0)
numpy.testing.assert_array_equal(arrays['y'][0, :, 5:], 0)
numpy.testing.assert_array_equal(arrays['y'][1, :3, :4], dicts[1]['y'])
numpy.testing.assert_array_equal(arrays['y'][1, 3:, :], 0)
numpy.testing.assert_array_equal(arrays['y'][1, :, 4:], 0)
numpy.testing.assert_array_equal(arrays['y'][2, :2, :6], dicts[2]['y'])
numpy.testing.assert_array_equal(arrays['y'][2, 2:, :], 0)
@testing.parameterize(
{'padding': None},
{'padding': 0},
)
class TestConcatExamplesWithBuiltInTypes(unittest.TestCase):
int_arrays = [1, 2, 3]
float_arrays = [1.0, 2.0, 3.0]
def check_device(self, array, device, expected_device):
self.assertIsInstance(array, expected_device.xp.ndarray)
self.assertEqual(
backend.get_device_from_array(array), expected_device)
def check_concat_arrays(
self, arrays, device, expected_device, expected_dtype):
array = dataset.concat_examples(arrays, device, self.padding)
self.assertEqual(array.shape, (len(arrays),))
self.check_device(array, device, expected_device)
np_array = backend.CpuDevice().send(array)
for x, y in zip(np_array, arrays):
assert x.dtype == expected_dtype
numpy.testing.assert_array_equal(
x, numpy.array(y, dtype=expected_dtype))
def test_concat_arrays_to_cpu(self):
if sys.platform == 'win32':
expected_int_dtype = numpy.int32
else:
expected_int_dtype = numpy.int64
for device in (-1, None):
self.check_concat_arrays(
self.int_arrays,
device,
backend.CpuDevice(),
expected_int_dtype)
self.check_concat_arrays(
self.float_arrays,
device,
backend.CpuDevice(),
numpy.float64)
@attr.gpu
def test_concat_arrays_to_gpu(self):
device = 0
if sys.platform == 'win32':
expected_int_dtype = numpy.int32
else:
expected_int_dtype = numpy.int64
self.check_concat_arrays(
self.int_arrays,
device,
backend.GpuDevice.from_device_id(0),
expected_int_dtype)
self.check_concat_arrays(
self.float_arrays,
device,
backend.GpuDevice.from_device_id(0),
numpy.float64)
@attr.chainerx
def test_concat_arrays_to_chainerx(self):
device = 'native:0'
self.check_concat_arrays(
self.int_arrays,
device,
backend.ChainerxDevice(chainerx.get_device(device)),
numpy.int64)
self.check_concat_arrays(
self.float_arrays,
device,
backend.ChainerxDevice(chainerx.get_device(device)),
numpy.float64)
def get_xp(gpu):
if gpu:
return cuda.cupy
else:
return numpy
@testing.parameterize(
{'device': None, 'src_gpu': False, 'dst_gpu': False},
{'device': -1, 'src_gpu': False, 'dst_gpu': False},
)
class TestToDeviceCPU(unittest.TestCase):
def test_to_device(self):
src_xp = get_xp(self.src_gpu)
dst_xp = get_xp(self.dst_gpu)
x = src_xp.array([1], 'i')
y = dataset.to_device(self.device, x)
self.assertIsInstance(y, dst_xp.ndarray)
@testing.parameterize(
{'device': None, 'src_gpu': True, 'dst_gpu': True},
{'device': -1, 'src_gpu': True, 'dst_gpu': False},
{'device': 0, 'src_gpu': False, 'dst_gpu': True},
{'device': 0, 'src_gpu': True, 'dst_gpu': True},
)
class TestToDeviceGPU(unittest.TestCase):
@attr.gpu
def test_to_device(self):
src_xp = get_xp(self.src_gpu)
dst_xp = get_xp(self.dst_gpu)
x = src_xp.array([1], 'i')
y = dataset.to_device(self.device, x)
self.assertIsInstance(y, dst_xp.ndarray)
if self.device is not None and self.device >= 0:
self.assertEqual(int(y.device), self.device)
if self.device is None and self.src_gpu:
self.assertEqual(int(x.device), int(y.device))
@testing.parameterize(
{'device': 1, 'src_gpu': False, 'dst_gpu': True},
{'device': 1, 'src_gpu': True, 'dst_gpu': True},
)
class TestToDeviceMultiGPU(unittest.TestCase):
@attr.multi_gpu(2)
def test_to_device(self):
src_xp = get_xp(self.src_gpu)
dst_xp = get_xp(self.dst_gpu)
x = src_xp.array([1], 'i')
y = dataset.to_device(self.device, x)
self.assertIsInstance(y, dst_xp.ndarray)
self.assertEqual(int(y.device), self.device)
testing.run_module(__name__, __file__)
| 16,187
| 36.299539
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/dataset_tests/test_dataset_mixin.py
|
import unittest
import numpy
from chainer import dataset
from chainer import testing
class SimpleDataset(dataset.DatasetMixin):
def __init__(self, values):
self.values = values
def __len__(self):
return len(self.values)
def get_example(self, i):
return self.values[i]
class TestDatasetMixin(unittest.TestCase):
def setUp(self):
self.ds = SimpleDataset([1, 2, 3, 4, 5])
def test_getitem(self):
for i in range(len(self.ds.values)):
self.assertEqual(self.ds[i], self.ds.values[i])
def test_slice(self):
ds = self.ds
self.assertEqual(ds[:], ds.values)
self.assertEqual(ds[1:], ds.values[1:])
self.assertEqual(ds[2:], ds.values[2:])
self.assertEqual(ds[1:4], ds.values[1:4])
self.assertEqual(ds[0:4], ds.values[0:4])
self.assertEqual(ds[1:5], ds.values[1:5])
self.assertEqual(ds[:-1], ds.values[:-1])
self.assertEqual(ds[1:-2], ds.values[1:-2])
self.assertEqual(ds[-4:-1], ds.values[-4:-1])
self.assertEqual(ds[::-1], ds.values[::-1])
self.assertEqual(ds[4::-1], ds.values[4::-1])
self.assertEqual(ds[:2:-1], ds.values[:2:-1])
self.assertEqual(ds[-1::-1], ds.values[-1::-1])
self.assertEqual(ds[:-3:-1], ds.values[:-3:-1])
self.assertEqual(ds[-1:-3:-1], ds.values[-1:-3:-1])
self.assertEqual(ds[4:1:-1], ds.values[4:1:-1])
self.assertEqual(ds[-1:1:-1], ds.values[-1:1:-1])
self.assertEqual(ds[4:-3:-1], ds.values[4:-3:-1])
self.assertEqual(ds[-2:-4:-1], ds.values[-2:-4:-1])
self.assertEqual(ds[::2], ds.values[::2])
self.assertEqual(ds[1::2], ds.values[1::2])
self.assertEqual(ds[:3:2], ds.values[:3:2])
self.assertEqual(ds[1:4:2], ds.values[1:4:2])
self.assertEqual(ds[::-2], ds.values[::-2])
self.assertEqual(ds[:10], ds.values[:10])
def test_advanced_indexing(self):
ds = self.ds
self.assertEqual(ds[[1, 2]], [ds.values[1], ds.values[2]])
self.assertEqual(ds[[1, 2]], ds[1:3])
self.assertEqual(ds[[4, 0]], [ds.values[4], ds.values[0]])
self.assertEqual(ds[[4]], [ds.values[4]])
self.assertEqual(ds[[4, 1, 3, 2, 2, 1]],
[ds.values[4], ds.values[1], ds.values[3],
ds.values[2], ds.values[2], ds.values[1]])
self.assertEqual(ds[[-2, -1]], [ds.values[-2], ds.values[-1]])
# test ndarray
self.assertEqual(ds[numpy.asarray([1, 2, 3])], ds[1:4])
def test_large_dataset(self):
# Check performance of __get_item__ with large size of dataset
ds = SimpleDataset(list(numpy.arange(1000000)))
self.assertEqual(ds[3453], ds.values[3453])
self.assertEqual(ds[:], ds.values)
self.assertEqual(ds[2:987654:7], ds.values[2:987654:7])
self.assertEqual(ds[::-3], ds.values[::-3])
for i in range(100):
self.assertEqual(ds[i * 4096:(i + 1) * 4096],
ds.values[i * 4096:(i + 1) * 4096])
testing.run_module(__name__, __file__)
| 3,128
| 36.25
| 70
|
py
|
chainer
|
chainer-master/tests/chainer_tests/dataset_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/dataset_tests/test_download.py
|
import os
import shutil
import tempfile
import unittest
import mock
from chainer import dataset
from chainer import testing
class TestGetSetDatasetRoot(unittest.TestCase):
def test_set_dataset_root(self):
orig_root = dataset.get_dataset_root()
new_root = '/tmp/dataset_root'
try:
dataset.set_dataset_root(new_root)
self.assertEqual(dataset.get_dataset_root(), new_root)
finally:
dataset.set_dataset_root(orig_root)
class TestGetDatasetDirectory(unittest.TestCase):
def test_get_dataset_directory(self):
root = dataset.get_dataset_root()
path = dataset.get_dataset_directory('test', False)
self.assertEqual(path, os.path.join(root, 'test'))
class TestCacheOrLoadFile(unittest.TestCase):
def setUp(self):
self.default_dataset_root = dataset.get_dataset_root()
self.temp_dir = tempfile.mkdtemp()
dataset.set_dataset_root(self.temp_dir)
def tearDown(self):
dataset.set_dataset_root(self.default_dataset_root)
shutil.rmtree(self.temp_dir)
def test_cache_exists(self):
creator = mock.Mock()
loader = mock.Mock()
file_desc, file_name = tempfile.mkstemp()
try:
dataset.cache_or_load_file(file_name, creator, loader)
finally:
os.close(file_desc)
os.remove(file_name)
self.assertFalse(creator.called)
loader.assert_called_once_with(file_name)
def test_new_file(self):
def create(path):
with open(path, 'w') as f:
f.write('test')
creator = mock.Mock()
creator.side_effect = create
loader = mock.Mock()
dir_path = tempfile.mkdtemp()
# This file always does not exists as the directory is new.
path = os.path.join(dir_path, 'cahche')
try:
dataset.cache_or_load_file(path, creator, loader)
self.assertEqual(creator.call_count, 1)
self.assertFalse(loader.called)
self.assertTrue(os.path.exists(path))
with open(path) as f:
self.assertEqual(f.read(), 'test')
finally:
shutil.rmtree(dir_path)
class TestCacheOrLoadFileFileExists(unittest.TestCase):
def setUp(self):
self.default_dataset_root = dataset.get_dataset_root()
self.temp_file_desc, self.temp_file_name = tempfile.mkstemp()
dataset.set_dataset_root(self.temp_file_name)
self.dir_path = tempfile.mkdtemp()
def tearDown(self):
dataset.set_dataset_root(self.default_dataset_root)
os.close(self.temp_file_desc)
os.remove(self.temp_file_name)
shutil.rmtree(self.dir_path)
def test_file_exists(self):
creator = mock.Mock()
loader = mock.Mock()
# This file always does not exists as the directory is new.
path = os.path.join(self.dir_path, 'cahche')
with self.assertRaises(RuntimeError):
dataset.cache_or_load_file(path, creator, loader)
class TestCachedDownload(unittest.TestCase):
def setUp(self):
self.default_dataset_root = dataset.get_dataset_root()
self.temp_dir = tempfile.mkdtemp()
dataset.set_dataset_root(self.temp_dir)
def tearDown(self):
dataset.set_dataset_root(self.default_dataset_root)
shutil.rmtree(self.temp_dir)
def test_fail_to_make_dir(self):
with mock.patch('os.makedirs') as f:
f.side_effect = OSError()
with self.assertRaises(OSError):
dataset.cached_download('https://example.com')
def test_file_exists(self):
# Make an empty file which has the same name as the cache directory
with open(os.path.join(self.temp_dir, '_dl_cache'), 'w'):
pass
with self.assertRaises(OSError):
dataset.cached_download('https://example.com')
def test_cached_download(self):
with mock.patch('six.moves.urllib.request.urlretrieve') as f:
def download(url, path):
with open(path, 'w') as f:
f.write('test')
f.side_effect = download
cache_path = dataset.cached_download('https://example.com')
self.assertEqual(f.call_count, 1)
args, kwargs = f.call_args
self.assertEqual(kwargs, {})
self.assertEqual(len(args), 2)
# The second argument is a temporary path, and it is removed
self.assertEqual(args[0], 'https://example.com')
self.assertTrue(os.path.exists(cache_path))
with open(cache_path) as f:
stored_data = f.read()
self.assertEqual(stored_data, 'test')
testing.run_module(__name__, __file__)
| 4,757
| 29.305732
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/dataset_tests/tabular_tests/test_join.py
|
import unittest
import numpy as np
import six
import chainer
from chainer import testing
from chainer_tests.dataset_tests.tabular_tests import dummy_dataset
def _filter_params(params):
for param in params:
key_size = 0
key_size += 3 if param['mode_a'] else 1
key_size += 2 if param['mode_b'] else 1
if param['key_indices'] and \
any(key_size <= key_index for key_index in param['key_indices']):
continue
yield param
@testing.parameterize(*_filter_params(testing.product({
'mode_a': [tuple, dict, None],
'mode_b': [tuple, dict, None],
'return_array': [True, False],
'key_indices': [None, (0, 4, 1), (0, 2), (1, 0), ()],
})))
class TestJoin(unittest.TestCase):
def setUp(self):
if self.key_indices is None:
self.expected_key_indices_a = None
self.expected_key_indices_b = None
return
key_size_a = 3 if self.mode_a else 1
key_indices_a = tuple(
key_index
for key_index in self.key_indices
if key_index < key_size_a)
key_indices_b = tuple(
key_index - key_size_a
for key_index in self.key_indices
if key_size_a <= key_index)
if key_indices_a:
self.expected_key_indices_a = key_indices_a
if key_indices_b:
self.expected_key_indices_b = key_indices_b
def test_join(self):
def callback_a(indices, key_indices):
self.assertIsNone(indices)
self.assertEqual(key_indices, self.expected_key_indices_a)
dataset_a = dummy_dataset.DummyDataset(
mode=self.mode_a,
return_array=self.return_array, callback=callback_a,
convert=True)
def callback_b(indices, key_indices):
self.assertIsNone(indices)
self.assertEqual(key_indices, self.expected_key_indices_b)
dataset_b = dummy_dataset. DummyDataset(
keys=('d', 'e'), mode=self.mode_b,
return_array=self.return_array, callback=callback_b)
view = dataset_a.join(dataset_b)
self.assertIsInstance(view, chainer.dataset.TabularDataset)
self.assertEqual(len(view), len(dataset_a))
self.assertEqual(view.keys, dataset_a.keys + dataset_b.keys)
self.assertEqual(view.mode, dataset_a.mode or dataset_b.mode or tuple)
output = view.get_examples(None, self.key_indices)
data = np.vstack((dataset_a.data, dataset_b.data))
if self.key_indices is not None:
data = data[list(self.key_indices)]
for out, d in six.moves.zip_longest(output, data):
np.testing.assert_equal(out, d)
if self.return_array:
self.assertIsInstance(out, np.ndarray)
else:
self.assertIsInstance(out, list)
self.assertEqual(view.convert(output), 'converted')
class TestJoinInvalid(unittest.TestCase):
def test_join_length(self):
dataset_a = dummy_dataset.DummyDataset()
dataset_b = dummy_dataset.DummyDataset(size=5, keys=('d', 'e'))
with self.assertRaises(ValueError):
dataset_a.join(dataset_b)
def test_join_conflict_key(self):
dataset_a = dummy_dataset.DummyDataset()
dataset_b = dummy_dataset.DummyDataset(keys=('a', 'd'))
with self.assertRaises(ValueError):
dataset_a.join(dataset_b)
testing.run_module(__name__, __file__)
| 3,492
| 30.1875
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/dataset_tests/tabular_tests/test_tabular_dataset.py
|
import unittest
import numpy as np
from chainer import testing
from chainer_tests.dataset_tests.tabular_tests import dummy_dataset
@testing.parameterize(*testing.product({
'mode': [tuple, dict, None],
'return_array': [True, False],
}))
class TestTabularDataset(unittest.TestCase):
def test_fetch(self):
def callback(indices, key_indices):
self.assertIsNone(indices)
self.assertIsNone(key_indices)
dataset = dummy_dataset.DummyDataset(
mode=self.mode, return_array=self.return_array, callback=callback)
output = dataset.fetch()
if self.mode is tuple:
expected = tuple(dataset.data)
elif self.mode is dict:
expected = dict(zip(('a', 'b', 'c'), dataset.data))
elif self.mode is None:
expected = dataset.data[0]
np.testing.assert_equal(output, expected)
if self.mode is dict:
output = output.values()
elif self.mode is None:
output = output,
for out in output:
if self.return_array:
self.assertIsInstance(out, np.ndarray)
else:
self.assertIsInstance(out, list)
def test_convert(self):
dataset = dummy_dataset.DummyDataset(
mode=self.mode, return_array=self.return_array)
output = dataset.convert(dataset.fetch())
if self.mode is tuple:
expected = tuple(dataset.data)
elif self.mode is dict:
expected = dict(zip(('a', 'b', 'c'), dataset.data))
elif self.mode is None:
expected = dataset.data[0]
np.testing.assert_equal(output, expected)
if self.mode is dict:
output = output.values()
elif self.mode is None:
output = output,
for out in output:
self.assertIsInstance(out, np.ndarray)
def test_get_example(self):
def callback(indices, key_indices):
self.assertEqual(indices, [3])
self.assertIsNone(key_indices)
dataset = dummy_dataset.DummyDataset(
mode=self.mode, return_array=self.return_array, callback=callback)
if self.mode is tuple:
expected = tuple(dataset.data[:, 3])
elif self.mode is dict:
expected = dict(zip(('a', 'b', 'c'), dataset.data[:, 3]))
elif self.mode is None:
expected = dataset.data[0, 3]
self.assertEqual(dataset.get_example(3), expected)
def test_iter(self):
dataset = dummy_dataset.DummyDataset(
mode=self.mode, return_array=self.return_array)
it = iter(dataset)
for i in range(10):
if self.mode is tuple:
expected = tuple(dataset.data[:, i])
elif self.mode is dict:
expected = dict(zip(('a', 'b', 'c'), dataset.data[:, i]))
elif self.mode is None:
expected = dataset.data[0, i]
self.assertEqual(next(it), expected)
with self.assertRaises(StopIteration):
next(it)
testing.run_module(__name__, __file__)
| 3,126
| 30.908163
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/dataset_tests/tabular_tests/test_slice.py
|
import unittest
import warnings
import numpy as np
import six
import chainer
from chainer import testing
from chainer_tests.dataset_tests.tabular_tests import dummy_dataset
def _filter_params(params):
for param in params:
if 'expected_len' in param and \
isinstance(param['get_examples_indices'], list) and \
any(param['expected_len'] <= index
for index in param['get_examples_indices']):
continue
if 'expected_keys' in param and \
isinstance(param['get_examples_key_indices'], tuple) and \
any(len(param['expected_keys']) <= key_index
for key_index in param['get_examples_key_indices']):
continue
# To reduce the number of tests,
# drop combinations of indices and keys.
# (check only `slice[indices]` and `slice[:, keys]`)
if not (param['indices'] == slice(None) and
param['get_examples_indices'] is None) and \
not (param['keys'] is None and
param['get_examples_key_indices'] is None):
continue
yield param
@testing.parameterize(*_filter_params(testing.product_dict(
testing.product_dict(
[{'mode': tuple}, {'mode': dict}],
[
{'keys': None, 'expected_keys': ('a', 'b', 'c')},
{'keys': 1, 'expected_keys': ('b',)},
{'keys': (1,), 'expected_keys': ('b',)},
{'keys': 3, 'key_exception': IndexError},
{'keys': (3,), 'key_exception': IndexError},
{'keys': 'c', 'expected_keys': ('c',)},
{'keys': ('c',), 'expected_keys': ('c',)},
{'keys': 'd', 'key_exception': KeyError},
{'keys': ('d',), 'key_exception': KeyError},
{'keys': (-1, 'a'), 'expected_keys': ('c', 'a')},
{'keys': (), 'expected_keys': ()},
],
) +
testing.product_dict(
[{'mode': None}],
[
{'keys': None, 'expected_keys': ('a',)},
{'keys': 0, 'expected_keys': ('a',)},
{'keys': (0,), 'expected_keys': ('a',)},
{'keys': 1, 'key_exception': IndexError},
{'keys': (1,), 'key_exception': IndexError},
{'keys': 'a', 'expected_keys': ('a',)},
{'keys': ('a',), 'expected_keys': ('a',)},
{'keys': 'b', 'key_exception': KeyError},
{'keys': ('b',), 'key_exception': KeyError},
{'keys': (), 'expected_keys': ()},
],
),
testing.product({
'return_array': [True, False],
'integer': [int, np.int32],
}),
[
{'indices': slice(None), 'expected_len': 10},
{'indices': [3, -2], 'expected_len': 2},
{'indices': [11, 1], 'index_exception': IndexError},
{'indices': [i in {1, 3} for i in range(10)], 'expected_len': 2},
{'indices': [True] * 11, 'index_exception': ValueError},
{'indices': slice(3, None, -2), 'expected_len': 2},
{'indices': [False, 3, 9, 5, True], 'expected_len': 5},
{'indices': [], 'expected_len': 0},
],
testing.product({
'get_examples_indices': [
None, [1], [1, 0], slice(0, 2, 1), slice(1, None, -1), []],
'get_examples_key_indices': [None, (1,), (1, 0), ()],
}),
)))
class TestSlice(unittest.TestCase):
def setUp(self):
self.exception = getattr(self, 'index_exception', None) \
or getattr(self, 'key_exception', None)
if isinstance(self.indices, list):
self.indices = [
index if isinstance(index, bool) else self.integer(index)
for index in self.indices]
def test_slice(self):
def callback(indices, key_indices):
if isinstance(self.indices, list) \
or isinstance(self.get_examples_indices, list):
self.assertIsInstance(indices, list)
elif isinstance(self.indices, slice) \
or isinstance(self.get_examples_indices, slice):
self.assertIsInstance(indices, slice)
else:
self.assertIsNone(indices)
if self.keys is None and self.get_examples_key_indices is None:
self.assertIsNone(key_indices)
else:
self.assertIsInstance(key_indices, tuple)
dataset = dummy_dataset.DummyDataset(
mode=self.mode, return_array=self.return_array, callback=callback,
convert=True)
if self.exception is not None:
with self.assertRaises(self.exception):
if self.keys is None:
dataset.slice[self.indices]
else:
dataset.slice[self.indices, self.keys]
return
if self.keys is None:
view = dataset.slice[self.indices]
data = dataset.data[:, _indices_for_numpy(self.indices)]
else:
view = dataset.slice[self.indices, self.keys]
if isinstance(self.keys, tuple):
keys = self.keys
else:
keys = self.keys,
key_indices = [
{'a': 0, 'b': 1, 'c': 2}.get(key, key) for key in keys]
data = dataset.data[key_indices][
:, _indices_for_numpy(self.indices)]
self.assertIsInstance(view, chainer.dataset.TabularDataset)
self.assertEqual(len(view), self.expected_len)
self.assertEqual(view.keys, self.expected_keys)
if self.keys is None:
self.assertEqual(view.mode, self.mode)
elif isinstance(self.keys, tuple):
self.assertEqual(view.mode, self.mode or tuple)
else:
self.assertEqual(view.mode, None)
output = view.get_examples(
self.get_examples_indices, self.get_examples_key_indices)
if self.get_examples_indices is not None:
data = data[:, _indices_for_numpy(self.get_examples_indices)]
if self.get_examples_key_indices is not None:
data = data[list(self.get_examples_key_indices)]
for out, d in six.moves.zip_longest(output, data):
np.testing.assert_equal(out, d)
if self.return_array:
self.assertIsInstance(out, np.ndarray)
else:
self.assertIsInstance(out, list)
self.assertEqual(view.convert(output), 'converted')
# Replace list of bool with ndarray of bool
# since old numpy cannot handle list of bool.
def _indices_for_numpy(indices):
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
if len(np.empty(2)[[False, True]]) == 1:
# new numpy
return indices
# old numpy
if isinstance(indices, list) and \
len(indices) > 0 and \
isinstance(indices[0], bool):
return np.array(indices)
else:
return indices
testing.run_module(__name__, __file__)
| 6,953
| 35.6
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/dataset_tests/tabular_tests/test_concat.py
|
import operator
import unittest
import numpy as np
import six
import chainer
from chainer import testing
from chainer_tests.dataset_tests.tabular_tests import dummy_dataset
@testing.parameterize(*testing.product_dict(
testing.product({
'mode_a': [tuple, dict, None],
'mode_b': [tuple, dict, None],
'return_array': [True, False],
}),
[
{'indices': None,
'expected_indices_a': None,
'expected_indices_b': None},
{'indices': [3, 1, 4, 12, 14, 13, 7, 5],
'expected_indices_a': [3, 1, 4, 7, 5],
'expected_indices_b': [2, 4, 3]},
{'indices': [3, 1, 4],
'expected_indices_a': [3, 1, 4]},
{'indices': slice(13, 6, -2),
'expected_indices_a': slice(9, 6, -2),
'expected_indices_b': slice(3, None, -2)},
{'indices': slice(9, None, -2),
'expected_indices_a': slice(9, None, -2)},
{'indices': [1, 2, 1],
'expected_indices_a': [1, 2, 1]},
{'indices': []},
],
))
class TestConcat(unittest.TestCase):
def test_concat(self):
def callback_a(indices, key_indices):
self.assertEqual(indices, self.expected_indices_a)
self.assertIsNone(key_indices)
dataset_a = dummy_dataset.DummyDataset(
keys=('a', 'b', 'c') if self.mode_b else ('a',),
mode=self.mode_a,
return_array=self.return_array, callback=callback_a,
convert=True)
def callback_b(indices, key_indices):
self.assertEqual(indices, self.expected_indices_b)
self.assertIsNone(key_indices)
dataset_b = dummy_dataset.DummyDataset(
size=5,
keys=('a', 'b', 'c') if self.mode_a else ('a',),
mode=self.mode_b,
return_array=self.return_array, callback=callback_b)
view = dataset_a.concat(dataset_b)
self.assertIsInstance(view, chainer.dataset.TabularDataset)
self.assertEqual(len(view), len(dataset_a) + len(dataset_b))
self.assertEqual(view.keys, dataset_a.keys)
self.assertEqual(view.mode, dataset_a.mode)
output = view.get_examples(self.indices, None)
data = np.hstack((dataset_a.data, dataset_b.data))
if self.indices is not None:
data = data[:, self.indices]
for out, d in six.moves.zip_longest(output, data):
np.testing.assert_equal(out, d)
if self.return_array and operator.xor(
hasattr(self, 'expected_indices_a'),
hasattr(self, 'expected_indices_b')):
self.assertIsInstance(out, np.ndarray)
else:
self.assertIsInstance(out, list)
self.assertEqual(view.convert(output), 'converted')
class TestConcatInvalid(unittest.TestCase):
def test_concat_key_length(self):
dataset_a = dummy_dataset.DummyDataset()
dataset_b = dummy_dataset.DummyDataset(keys=('a', 'b'))
with self.assertRaises(ValueError):
dataset_a.concat(dataset_b)
def test_concat_key_order(self):
dataset_a = dummy_dataset.DummyDataset()
dataset_b = dummy_dataset.DummyDataset(keys=('b', 'a', 'c'))
with self.assertRaises(ValueError):
dataset_a.concat(dataset_b)
testing.run_module(__name__, __file__)
| 3,352
| 31.872549
| 68
|
py
|
chainer
|
chainer-master/tests/chainer_tests/dataset_tests/tabular_tests/test_transform.py
|
import unittest
import numpy as np
import six
import chainer
from chainer import testing
from chainer_tests.dataset_tests.tabular_tests import dummy_dataset
# filter out invalid combinations of params
def _filter_params(params):
for param in params:
if param['out_mode'] is None and \
isinstance(param['key_indices'], tuple) and \
any(1 <= key_index
for key_index in param['key_indices']):
continue
yield param
@testing.parameterize(*_filter_params(testing.product({
'in_mode': [tuple, dict, None],
'out_mode': [tuple, dict, None],
'indices': [None, [1, 3], slice(None, 2)],
'key_indices': [None, (0,), (1, 0)],
'with_batch': [False, True],
})))
class TestTransform(unittest.TestCase):
def test_transform(self):
dataset = dummy_dataset.DummyDataset(
mode=self.in_mode, return_array=True, convert=True)
def transform(*args, **kwargs):
if self.in_mode is tuple:
self.assertEqual(len(args), 3)
self.assertEqual(len(kwargs), 0)
a, b, c = args
elif self.in_mode is dict:
self.assertEqual(len(args), 0)
self.assertEqual(len(kwargs), 3)
a, b, c = kwargs['a'], kwargs['b'], kwargs['c']
elif self.in_mode is None:
self.assertEqual(len(args), 1)
self.assertEqual(len(kwargs), 0)
a, = args
b, c = a, a
if self.with_batch:
self.assertIsInstance(a, np.ndarray)
self.assertIsInstance(b, np.ndarray)
self.assertIsInstance(c, np.ndarray)
else:
self.assertIsInstance(a, float)
self.assertIsInstance(b, float)
self.assertIsInstance(c, float)
if self.out_mode is tuple:
return a + b, b + c
elif self.out_mode is dict:
return {'alpha': a + b, 'beta': b + c}
elif self.out_mode is None:
return a + b + c
if self.in_mode is not None:
a, b, c = dataset.data
else:
a, = dataset.data
b, c = a, a
if self.out_mode is not None:
if self.with_batch:
view = dataset.transform_batch(('alpha', 'beta'), transform)
else:
view = dataset.transform(('alpha', 'beta'), transform)
data = np.vstack((a + b, b + c))
else:
if self.with_batch:
view = dataset.transform_batch('alpha', transform)
else:
view = dataset.transform('alpha', transform)
data = (a + b + c)[None]
self.assertIsInstance(view, chainer.dataset.TabularDataset)
self.assertEqual(len(view), len(dataset))
if self.out_mode is not None:
self.assertEqual(view.keys, ('alpha', 'beta'))
self.assertEqual(view.mode, self.out_mode)
else:
self.assertEqual(view.keys, ('alpha',))
self.assertEqual(view.mode, self.out_mode)
output = view.get_examples(self.indices, self.key_indices)
if self.indices is not None:
data = data[:, self.indices]
if self.key_indices is not None:
data = data[list(self.key_indices)]
for out, d in six.moves.zip_longest(output, data):
np.testing.assert_equal(out, d)
if self.with_batch:
self.assertIsInstance(out, np.ndarray)
else:
self.assertIsInstance(out, list)
self.assertEqual(view.convert(view.fetch()), 'converted')
@testing.parameterize(
{'mode': tuple},
{'mode': dict},
{'mode': None},
)
class TestTransformInvalid(unittest.TestCase):
def setUp(self):
self.count = 0
def _transform(self, a, b, c):
self.count += 1
if self.count % 2 == 0:
mode = self.mode
else:
if self.mode is tuple:
mode = dict
elif self.mode is dict:
mode = None
elif self.mode is None:
mode = tuple
if mode is tuple:
return a,
elif mode is dict:
return {'a': a}
elif mode is None:
return a
def test_transform_inconsistent_mode(self):
dataset = dummy_dataset.DummyDataset()
view = dataset.transform(('a',), self._transform)
view.get_examples([0], None)
with self.assertRaises(ValueError):
view.get_examples([0], None)
def test_transform_batch_inconsistent_mode(self):
dataset = dummy_dataset.DummyDataset()
view = dataset.transform_batch(('a',), self._transform)
view.get_examples(None, None)
with self.assertRaises(ValueError):
view.get_examples(None, None)
def test_transform_batch_length_changed(self):
dataset = dummy_dataset.DummyDataset()
def transform_batch(a, b, c):
if self.mode is tuple:
return a + [0],
elif self.mode is dict:
return {'a': a + [0]}
elif self.mode is None:
return a + [0]
view = dataset.transform_batch(('a',), transform_batch)
with self.assertRaises(ValueError):
view.get_examples(None, None)
testing.run_module(__name__, __file__)
| 5,484
| 30.705202
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/dataset_tests/tabular_tests/dummy_dataset.py
|
import numpy as np
import chainer
from chainer import testing
class DummyDataset(chainer.dataset.TabularDataset):
def __init__(
self, size=10, keys=('a', 'b', 'c'), mode=tuple,
return_array=False, callback=None, convert=False):
if mode is None:
keys = keys[0],
self._keys = keys
self._mode = mode
self._return_array = return_array
self._callback = callback
self._convert = convert
self.data = np.random.uniform(size=(len(keys), size))
def __len__(self):
return self.data.shape[1]
@property
def keys(self):
return self._keys
@property
def mode(self):
return self._mode
def get_examples(self, indices, key_indices):
if self._callback:
self._callback(indices, key_indices)
data = self.data
if indices is not None:
data = data[:, indices]
if key_indices is not None:
data = data[list(key_indices)]
if self._return_array:
return tuple(data)
else:
return tuple(list(d) for d in data)
def convert(self, data):
if self._convert:
return 'converted'
else:
return super(DummyDataset, self).convert(data)
# tests/chainer_tests/test_runnable.py
testing.run_module(__name__, __file__)
| 1,382
| 22.844828
| 62
|
py
|
chainer
|
chainer-master/tests/chainer_tests/dataset_tests/tabular_tests/test_with_converter.py
|
import unittest
import numpy as np
import chainer
from chainer import testing
from chainer_tests.dataset_tests.tabular_tests import dummy_dataset
@testing.parameterize(
{'mode': tuple},
{'mode': dict},
{'mode': None},
)
class TestWithConverter(unittest.TestCase):
def test_with_converter(self):
dataset = dummy_dataset.DummyDataset(mode=self.mode)
def converter(*args, **kwargs):
if self.mode is tuple:
np.testing.assert_equal(args, tuple(dataset.data))
self.assertEqual(kwargs, {})
elif self.mode is dict:
self.assertEqual(args, ())
np.testing.assert_equal(
kwargs, dict(zip(('a', 'b', 'c'), dataset.data)))
elif self.mode is None:
np.testing.assert_equal(args, tuple(dataset.data))
self.assertEqual(kwargs, {})
return 'converted'
view = dataset.with_converter(converter)
self.assertIsInstance(view, chainer.dataset.TabularDataset)
self.assertEqual(len(view), len(dataset))
self.assertEqual(view.keys, dataset.keys)
self.assertEqual(view.mode, dataset.mode)
self.assertEqual(
view.get_examples(None, None), dataset.get_examples(None, None))
self.assertEqual(view.convert(view.fetch()), 'converted')
testing.run_module(__name__, __file__)
| 1,413
| 30.422222
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/dataset_tests/tabular_tests/test_asmode.py
|
import unittest
import chainer
from chainer import testing
from chainer_tests.dataset_tests.tabular_tests import dummy_dataset
@testing.parameterize(
{'mode': tuple},
{'mode': dict},
{'mode': None},
)
class TestAstuple(unittest.TestCase):
def test_astuple(self):
dataset = dummy_dataset.DummyDataset(mode=self.mode, convert=True)
view = dataset.astuple()
self.assertIsInstance(view, chainer.dataset.TabularDataset)
self.assertEqual(len(view), len(dataset))
self.assertEqual(view.keys, dataset.keys)
self.assertEqual(view.mode, tuple)
self.assertEqual(
view.get_examples(None, None), dataset.get_examples(None, None))
self.assertEqual(view.convert(view.fetch()), 'converted')
@testing.parameterize(
{'mode': tuple},
{'mode': dict},
{'mode': None},
)
class TestAsdict(unittest.TestCase):
def test_asdict(self):
dataset = dummy_dataset.DummyDataset(mode=self.mode, convert=True)
view = dataset.asdict()
self.assertIsInstance(view, chainer.dataset.TabularDataset)
self.assertEqual(len(view), len(dataset))
self.assertEqual(view.keys, dataset.keys)
self.assertEqual(view.mode, dict)
self.assertEqual(
view.get_examples(None, None), dataset.get_examples(None, None))
self.assertEqual(view.convert(view.fetch()), 'converted')
testing.run_module(__name__, __file__)
| 1,451
| 29.893617
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/dataset_tests/tabular_tests/test_from_data.py
|
import unittest
import numpy as np
import chainer
from chainer.dataset import tabular
from chainer import testing
class TestFromData(unittest.TestCase):
def test_unary_array(self):
dataset = tabular.from_data(np.arange(10))
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(len(dataset.keys), 1)
self.assertIsNone(dataset.mode)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, [1, 3])
self.assertIsInstance(output, np.ndarray)
def test_unary_array_with_key(self):
dataset = tabular.from_data(('a', np.arange(10)))
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(dataset.keys, ('a',))
self.assertIsNone(dataset.mode)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, [1, 3])
self.assertIsInstance(output, np.ndarray)
def test_unary_list(self):
dataset = tabular.from_data([2, 7, 1, 8, 4, 5, 9, 0, 3, 6])
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(len(dataset.keys), 1)
self.assertIsNone(dataset.mode)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, [7, 8])
self.assertIsInstance(output, list)
def test_unary_list_with_key(self):
dataset = tabular.from_data(('a', [2, 7, 1, 8, 4, 5, 9, 0, 3, 6]))
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(dataset.keys, ('a',))
self.assertIsNone(dataset.mode)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, [7, 8])
self.assertIsInstance(output, list)
def test_unary_callable_unary(self):
dataset = tabular.from_data(('a', lambda i: i * i), size=10)
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(dataset.keys, ('a',))
self.assertIsNone(dataset.mode)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, [1, 9])
self.assertIsInstance(output, list)
def test_unary_callable_tuple(self):
dataset = tabular.from_data(
(('a', 'b'), lambda i: (i * i, -i)), size=10)
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(dataset.keys, ('a', 'b'))
self.assertEqual(dataset.mode, tuple)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, ([1, 9], [-1, -3]))
for out in output:
self.assertIsInstance(out, list)
def test_unary_callable_dict(self):
dataset = tabular.from_data(
(('a', 'b'), lambda i: {'a': i * i, 'b': -i}), size=10)
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(dataset.keys, ('a', 'b'))
self.assertEqual(dataset.mode, dict)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, {'a': [1, 9], 'b': [-1, -3]})
for out in output.values():
self.assertIsInstance(out, list)
def test_unary_callable_without_key(self):
with self.assertRaises(ValueError):
tabular.from_data(lambda i: i * i, size=10)
def test_unary_callable_without_size(self):
with self.assertRaises(ValueError):
tabular.from_data(('a', lambda i: i * i))
def test_tuple_array_list(self):
dataset = tabular.from_data(
(np.arange(10), [2, 7, 1, 8, 4, 5, 9, 0, 3, 6]))
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(len(dataset.keys), 2)
self.assertEqual(dataset.mode, tuple)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, ([1, 3], [7, 8]))
self.assertIsInstance(output[0], np.ndarray)
self.assertIsInstance(output[1], list)
def test_tuple_array_with_key_list(self):
dataset = tabular.from_data(
(('a', np.arange(10)), [2, 7, 1, 8, 4, 5, 9, 0, 3, 6]))
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(len(dataset.keys), 2)
self.assertEqual(dataset.keys[0], 'a')
self.assertEqual(dataset.mode, tuple)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, ([1, 3], [7, 8]))
self.assertIsInstance(output[0], np.ndarray)
self.assertIsInstance(output[1], list)
def test_tuple_array_list_with_key(self):
dataset = tabular.from_data(
(np.arange(10), ('b', [2, 7, 1, 8, 4, 5, 9, 0, 3, 6])))
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(len(dataset.keys), 2)
self.assertEqual(dataset.keys[1], 'b')
self.assertEqual(dataset.mode, tuple)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, ([1, 3], [7, 8]))
self.assertIsInstance(output[0], np.ndarray)
self.assertIsInstance(output[1], list)
def test_tuple_array_callable_unary(self):
dataset = tabular.from_data((np.arange(10), ('b', lambda i: i * i)))
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(len(dataset.keys), 2)
self.assertEqual(dataset.keys[1], 'b')
self.assertEqual(dataset.mode, tuple)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, ([1, 3], [1, 9]))
self.assertIsInstance(output[0], np.ndarray)
self.assertIsInstance(output[1], list)
def test_tuple_array_callable_tuple(self):
dataset = tabular.from_data(
(np.arange(10), (('b', 'c'), lambda i: (i * i, -i))))
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(len(dataset.keys), 3)
self.assertEqual(dataset.keys[1:], ('b', 'c'))
self.assertEqual(dataset.mode, tuple)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, ([1, 3], [1, 9], [-1, -3]))
self.assertIsInstance(output[0], np.ndarray)
self.assertIsInstance(output[1], list)
def test_tuple_array_callable_dict(self):
dataset = tabular.from_data(
(np.arange(10), (('b', 'c'), lambda i: {'b': i * i, 'c': -i})))
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(len(dataset.keys), 3)
self.assertEqual(dataset.keys[1:], ('b', 'c'))
self.assertEqual(dataset.mode, tuple)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, ([1, 3], [1, 9], [-1, -3]))
self.assertIsInstance(output[0], np.ndarray)
self.assertIsInstance(output[1], list)
def test_tuple_array_with_key_callable_unary(self):
dataset = tabular.from_data(
(('a', np.arange(10)), ('b', lambda i: i * i)))
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(dataset.keys, ('a', 'b'))
self.assertEqual(dataset.mode, tuple)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, ([1, 3], [1, 9]))
self.assertIsInstance(output[0], np.ndarray)
self.assertIsInstance(output[1], list)
def test_tuple_callable_unary_callable_unary(self):
dataset = tabular.from_data(
(('a', lambda i: i * i), ('b', lambda i: -i)), size=10)
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(dataset.keys, ('a', 'b'))
self.assertEqual(dataset.mode, tuple)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, ([1, 9], [-1, -3]))
self.assertIsInstance(output[0], list)
self.assertIsInstance(output[1], list)
def test_tuple_callable_unary_callable_unary_without_size(self):
with self.assertRaises(ValueError):
tabular.from_data((('a', lambda i: i * i), ('b', lambda i: -i)))
def test_dict_array_list(self):
dataset = tabular.from_data(
{'a': np.arange(10), 'b': [2, 7, 1, 8, 4, 5, 9, 0, 3, 6]})
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(set(dataset.keys), {'a', 'b'})
self.assertEqual(dataset.mode, dict)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, {'a': [1, 3], 'b': [7, 8]})
self.assertIsInstance(output['a'], np.ndarray)
self.assertIsInstance(output['b'], list)
def test_dict_array_callable_unary(self):
dataset = tabular.from_data({'a': np.arange(10), 'b': lambda i: i * i})
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(set(dataset.keys), {'a', 'b'})
self.assertEqual(dataset.mode, dict)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, {'a': [1, 3], 'b': [1, 9]})
self.assertIsInstance(output['a'], np.ndarray)
self.assertIsInstance(output['b'], list)
def test_dict_array_callable_tuple(self):
dataset = tabular.from_data(
{'a': np.arange(10), ('b', 'c'): lambda i: (i * i, -i)})
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(set(dataset.keys), {'a', 'b', 'c'})
self.assertEqual(dataset.mode, dict)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(
output, {'a': [1, 3], 'b': [1, 9], 'c': [-1, -3]})
self.assertIsInstance(output['a'], np.ndarray)
self.assertIsInstance(output['b'], list)
self.assertIsInstance(output['c'], list)
def test_dict_array_callable_dict(self):
dataset = tabular.from_data(
{'a': np.arange(10), ('b', 'c'): lambda i: {'b': i * i, 'c': -i}})
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(set(dataset.keys), {'a', 'b', 'c'})
self.assertEqual(dataset.mode, dict)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(
output, {'a': [1, 3], 'b': [1, 9], 'c': [-1, -3]})
self.assertIsInstance(output['a'], np.ndarray)
self.assertIsInstance(output['b'], list)
self.assertIsInstance(output['c'], list)
def test_dict_callable_unary_callable_unary(self):
dataset = tabular.from_data(
{'a': lambda i: i * i, 'b': lambda i: -i}, size=10)
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), 10)
self.assertEqual(set(dataset.keys), {'a', 'b'})
self.assertEqual(dataset.mode, dict)
output = dataset.slice[[1, 3]].fetch()
np.testing.assert_equal(output, {'a': [1, 9], 'b': [-1, -3]})
self.assertIsInstance(output['a'], list)
self.assertIsInstance(output['b'], list)
def test_dict_callable_unary_callable_unary_without_size(self):
with self.assertRaises(ValueError):
tabular.from_data(({'a': lambda i: i * i, 'b': lambda i: -i}))
def test_unique(self):
dataset_a = tabular.from_data(np.arange(10))
dataset_b = tabular.from_data(np.arange(10))
self.assertNotEqual(dataset_a.keys, dataset_b.keys)
testing.run_module(__name__, __file__)
| 12,247
| 38.25641
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/dataset_tests/tabular_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/dataset_tests/tabular_tests/test_delegate_dataset.py
|
import unittest
import chainer
from chainer.dataset import tabular
from chainer import testing
from chainer_tests.dataset_tests.tabular_tests import dummy_dataset
@testing.parameterize(
{'mode': tuple},
{'mode': dict},
{'mode': None},
)
class TestDelegateDataset(unittest.TestCase):
def test_delegate_dataset(self):
dataset = tabular.DelegateDataset(
dummy_dataset.DummyDataset(mode=self.mode))
self.assertIsInstance(dataset, chainer.dataset.TabularDataset)
self.assertEqual(len(dataset), len(dataset.dataset))
self.assertEqual(dataset.keys, dataset.dataset.keys)
self.assertEqual(dataset.mode, dataset.dataset.mode)
self.assertEqual(
dataset.get_example(3), dataset.dataset.get_example(3))
testing.run_module(__name__, __file__)
| 826
| 26.566667
| 70
|
py
|
chainer
|
chainer-master/tests/chainer_tests/testing_tests/test_condition.py
|
import unittest
import pytest
from chainer import testing
from chainer.testing import condition
SKIP_REASON = 'test skip reason'
# The test fixtures of this TestCase is used to be decorated by
# decorator in test. So we do not run them alone.
class MockUnitTest(unittest.TestCase):
failure_case_counter = 0
success_case_counter = 0
skip_case_counter = 0
probabilistic_case_counter = 0
probabilistic_case_success_counter = 0
probabilistic_case_failure_counter = 0
@staticmethod
def clear_counter():
MockUnitTest.failure_case_counter = 0
MockUnitTest.success_case_counter = 0
MockUnitTest.skip_case_counter = 0
MockUnitTest.probabilistic_case_counter = 0
MockUnitTest.probabilistic_case_success_counter = 0
MockUnitTest.probabilistic_case_failure_counter = 0
def failure_case(self):
MockUnitTest.failure_case_counter += 1
self.fail()
def success_case(self):
MockUnitTest.success_case_counter += 1
self.assertTrue(True)
def skip_case(self):
MockUnitTest.skip_case_counter += 1
self.skipTest(SKIP_REASON)
def skip_case_pytest(self):
MockUnitTest.skip_case_counter += 1
pytest.skip(SKIP_REASON)
def error_case(self):
raise Exception()
def probabilistic_case(self):
MockUnitTest.probabilistic_case_counter += 1
if MockUnitTest.probabilistic_case_counter % 2 == 0:
MockUnitTest.probabilistic_case_success_counter += 1
self.assertTrue(True)
else:
MockUnitTest.probabilistic_case_failure_counter += 1
self.fail()
def runTest(self):
pass
def _should_fail(self, f):
try:
f(self.unit_test)
self.fail(
'AssertionError is expected to be raised, but none is raised')
except AssertionError as e:
# check if the detail is included in the error object
self.assertIn('first error message:', str(e))
def _should_pass(self, f):
f(self.unit_test)
def _should_skip(self, f):
try:
f(self.unit_test)
self.fail(
'SkipTest is expected to be raised, but none is raised')
except unittest.SkipTest as e:
self.assertIn(SKIP_REASON, str(e))
class TestRepeatWithSuccessAtLeast(unittest.TestCase):
def _decorate(self, f, times, min_success):
return condition.repeat_with_success_at_least(
times, min_success)(f)
def setUp(self):
self.unit_test = MockUnitTest()
MockUnitTest.clear_counter()
def test_all_trials_fail(self):
f = self._decorate(MockUnitTest.failure_case, 10, 1)
_should_fail(self, f)
self.assertEqual(self.unit_test.failure_case_counter, 10)
def test_all_trials_fail2(self):
f = self._decorate(MockUnitTest.failure_case, 10, 0)
_should_pass(self, f)
self.assertLessEqual(self.unit_test.failure_case_counter, 10)
def test_all_trials_error(self):
f = self._decorate(MockUnitTest.error_case, 10, 1)
_should_fail(self, f)
def test_all_trials_succeed(self):
f = self._decorate(MockUnitTest.success_case, 10, 10)
_should_pass(self, f)
self.assertEqual(self.unit_test.success_case_counter, 10)
def test_all_trials_succeed2(self):
self.assertRaises(AssertionError,
condition.repeat_with_success_at_least,
10, 11)
def test_half_of_trials_succeed(self):
f = self._decorate(MockUnitTest.probabilistic_case, 10, 5)
_should_pass(self, f)
self.assertLessEqual(self.unit_test.probabilistic_case_counter, 10)
self.assertGreaterEqual(
self.unit_test.probabilistic_case_success_counter, 5)
self.assertLessEqual(
self.unit_test.probabilistic_case_failure_counter, 5)
def test_half_of_trials_succeed2(self):
f = self._decorate(MockUnitTest.probabilistic_case, 10, 6)
_should_fail(self, f)
self.assertLessEqual(self.unit_test.probabilistic_case_counter, 10)
self.assertLess(
self.unit_test.probabilistic_case_success_counter, 6)
self.assertGreaterEqual(
self.unit_test.probabilistic_case_failure_counter, 5)
class TestRepeat(unittest.TestCase):
def _decorate(self, f, times):
return condition.repeat(times)(f)
def setUp(self):
self.unit_test = MockUnitTest()
MockUnitTest.clear_counter()
def test_failure_case(self):
f = self._decorate(MockUnitTest.failure_case, 10)
_should_fail(self, f)
self.assertLessEqual(self.unit_test.failure_case_counter, 10)
def test_success_case(self):
f = self._decorate(MockUnitTest.success_case, 10)
_should_pass(self, f)
self.assertEqual(self.unit_test.success_case_counter, 10)
def test_skip_case(self):
f = self._decorate(MockUnitTest.skip_case, 10)
_should_skip(self, f)
self.assertEqual(self.unit_test.skip_case_counter, 1)
def test_skip_case_pytest(self):
f = self._decorate(MockUnitTest.skip_case_pytest, 10)
_should_skip(self, f)
self.assertEqual(self.unit_test.skip_case_counter, 1)
def test_probabilistic_case(self):
f = self._decorate(MockUnitTest.probabilistic_case, 10)
_should_fail(self, f)
self.assertLessEqual(self.unit_test.probabilistic_case_counter, 10)
self.assertLess(self.unit_test.probabilistic_case_success_counter, 10)
self.assertGreater(
self.unit_test.probabilistic_case_failure_counter, 0)
class TestRetry(unittest.TestCase):
def _decorate(self, f, times):
return condition.retry(times)(f)
def setUp(self):
self.unit_test = MockUnitTest()
MockUnitTest.clear_counter()
def test_failure_case(self):
f = self._decorate(MockUnitTest.failure_case, 10)
_should_fail(self, f)
self.assertEqual(self.unit_test.failure_case_counter, 10)
def test_success_case(self):
f = self._decorate(MockUnitTest.success_case, 10)
_should_pass(self, f)
self.assertLessEqual(self.unit_test.success_case_counter, 10)
def test_skip_case(self):
f = self._decorate(MockUnitTest.skip_case, 10)
_should_skip(self, f)
self.assertEqual(self.unit_test.skip_case_counter, 1)
def test_skip_case_pytest(self):
f = self._decorate(MockUnitTest.skip_case_pytest, 10)
_should_skip(self, f)
self.assertEqual(self.unit_test.skip_case_counter, 1)
def test_probabilistic_case(self):
f = self._decorate(MockUnitTest.probabilistic_case, 10)
_should_pass(self, f)
self.assertLessEqual(
self.unit_test.probabilistic_case_counter, 10)
self.assertGreater(
self.unit_test.probabilistic_case_success_counter, 0)
self.assertLess(self.unit_test.probabilistic_case_failure_counter, 10)
testing.run_module(__name__, __file__)
| 7,056
| 31.37156
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/testing_tests/test_training.py
|
from __future__ import division
import math
import unittest
from chainer import testing
def _dummy_extension(trainer):
pass
@testing.parameterize(*testing.product({
'stop_trigger': [(5, 'iteration'), (5, 'epoch')],
'iter_per_epoch': [0.5, 1, 1.5, 5],
'extensions': [[], [_dummy_extension]]
}))
class TestGetTrainerWithMockUpdater(unittest.TestCase):
def setUp(self):
self.trainer = testing.get_trainer_with_mock_updater(
self.stop_trigger, self.iter_per_epoch,
extensions=self.extensions)
def test_run(self):
iteration = [0]
def check(trainer):
iteration[0] += 1
self.assertEqual(trainer.updater.iteration, iteration[0])
self.assertEqual(
trainer.updater.epoch, iteration[0] // self.iter_per_epoch)
self.assertEqual(
trainer.updater.epoch_detail,
iteration[0] / self.iter_per_epoch)
self.assertEqual(
trainer.updater.is_new_epoch,
(iteration[0] - 1) // self.iter_per_epoch !=
iteration[0] // self.iter_per_epoch)
self.assertEqual(
trainer.updater.previous_epoch_detail,
(iteration[0] - 1) / self.iter_per_epoch)
self.assertEqual(len(self.extensions), len(self.trainer._extensions))
self.trainer.extend(check)
self.trainer.run()
if self.stop_trigger[1] == 'iteration':
self.assertEqual(iteration[0], self.stop_trigger[0])
elif self.stop_trigger[1] == 'epoch':
self.assertEqual(
iteration[0],
math.ceil(self.stop_trigger[0] * self.iter_per_epoch))
testing.run_module(__name__, __file__)
| 1,768
| 28.983051
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/testing_tests/test_unary_math_function_test.py
|
import unittest
from chainer import function_node
from chainer import testing
def dummy():
pass
class TestNoNumpyFunction(unittest.TestCase):
def test_no_numpy_function(self):
with self.assertRaises(ValueError):
testing.unary_math_function_unittest(dummy) # no numpy.dummy
class DummyLinear(function_node.FunctionNode):
@property
def label(self):
return 'dummy_linear'
def forward(self, x):
return x[0],
def backward(self, indexes, gy):
return gy[0],
def dummy_linear(x):
return DummyLinear().apply((x,))[0]
@testing.unary_math_function_unittest(dummy_linear,
func_expected=lambda x, dtype: x)
class TestIsLinear(unittest.TestCase):
pass
testing.run_module(__name__, __file__)
| 809
| 18.285714
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/testing_tests/test_parameterized.py
|
import unittest
from chainer import testing
@testing.parameterize(
{'actual': {'a': [1, 2], 'b': [3, 4, 5]},
'expect': [{'a': 1, 'b': 3}, {'a': 1, 'b': 4}, {'a': 1, 'b': 5},
{'a': 2, 'b': 3}, {'a': 2, 'b': 4}, {'a': 2, 'b': 5}]},
{'actual': {'a': [1, 2]}, 'expect': [{'a': 1}, {'a': 2}]},
{'actual': {'a': [1, 2], 'b': []}, 'expect': []},
{'actual': {'a': []}, 'expect': []},
{'actual': {}, 'expect': [{}]})
class ProductTest(unittest.TestCase):
def test_product(self):
self.assertListEqual(testing.product(self.actual), self.expect)
@testing.parameterize(
{'actual': [[{'a': 1, 'b': 3}, {'a': 2, 'b': 4}], [{'c': 5}, {'c': 6}]],
'expect': [{'a': 1, 'b': 3, 'c': 5}, {'a': 1, 'b': 3, 'c': 6},
{'a': 2, 'b': 4, 'c': 5}, {'a': 2, 'b': 4, 'c': 6}]},
{'actual': [[{'a': 1}, {'a': 2}], [{'b': 3}, {'b': 4}, {'b': 5}]],
'expect': [{'a': 1, 'b': 3}, {'a': 1, 'b': 4}, {'a': 1, 'b': 5},
{'a': 2, 'b': 3}, {'a': 2, 'b': 4}, {'a': 2, 'b': 5}]},
{'actual': [[{'a': 1}, {'a': 2}]], 'expect': [{'a': 1}, {'a': 2}]},
{'actual': [[{'a': 1}, {'a': 2}], []], 'expect': []},
{'actual': [[]], 'expect': []},
{'actual': [], 'expect': [{}]})
class ProductDictTest(unittest.TestCase):
def test_product_dict(self):
# TODO(kataoka): This check is tentative. See
# tests/chainer_tests/conftest.py
assert testing.product_dict is testing.parameterized._product_dict_orig
self.assertListEqual(testing.product_dict(*self.actual), self.expect)
def f(x):
return x
class C(object):
def __call__(self, x):
return x
def method(self, x):
return x
@testing.parameterize(
{'callable': f},
{'callable': lambda x: x},
{'callable': C()},
{'callable': C().method}
)
class TestParameterize(unittest.TestCase):
def test_callable(self):
y = self.callable(1)
self.assertEqual(y, 1)
def test_skip(self):
# Skipping the test case should not report error.
self.skipTest('skip')
@testing.parameterize(
{'param1': 1},
{'param1': 2})
@testing.parameterize(
{'param2': 3},
{'param2': 4})
class TestParameterizeTwice(unittest.TestCase):
# This test only checks if each of the parameterized combinations is a
# member of the expected combinations. This test does not check if each
# of the expected combinations is actually visited by the parameterization,
# as there are no way to test that in a robust manner.
def test_twice(self):
assert hasattr(self, 'param1')
assert hasattr(self, 'param2')
assert (self.param1, self.param2) in (
(1, 3),
(1, 4),
(2, 3),
(2, 4))
testing.run_module(__name__, __file__)
| 2,822
| 29.031915
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/testing_tests/test_matrix.py
|
import unittest
import numpy
from chainer import testing
from chainer.testing import matrix
@testing.parameterize(*testing.product({
'dtype': [
numpy.float16, numpy.float32, numpy.float64,
numpy.complex64, numpy.complex128,
],
'x_s_shapes': [
((0, 0), (0,)),
((2, 2), (2,)),
((2, 3), (2,)),
((3, 2), (2,)),
((2, 3, 4), (2, 3)),
((2, 4, 3), (2, 3)),
((0, 2, 3), (0, 2)),
# broadcast
((2, 2), ()),
((2, 3, 4), (2, 1)),
],
}))
class TestGenerateMatrix(unittest.TestCase):
def test_generate_matrix(self):
dtype = self.dtype
x_shape, s_shape = self.x_s_shapes
sv = numpy.random.uniform(0.5, 1.5, s_shape).astype(dtype().real.dtype)
x = testing.generate_matrix(x_shape, dtype=dtype, singular_values=sv)
assert x.shape == x_shape
if 0 in x_shape:
return
s = numpy.linalg.svd(
x.astype(numpy.complex128), full_matrices=False, compute_uv=False,
)
sv = matrix._broadcast_to(sv, s.shape)
sv_sorted = numpy.sort(sv, axis=-1)[..., ::-1]
rtol = 1e-3 if dtype == numpy.float16 else 1e-7
numpy.testing.assert_allclose(s, sv_sorted, rtol=rtol)
class TestGenerateMatrixInvalid(unittest.TestCase):
def test_no_singular_values(self):
with self.assertRaises(TypeError):
testing.generate_matrix((2, 2))
def test_invalid_shape(self):
with self.assertRaises(ValueError):
testing.generate_matrix((2,), singular_values=1)
def test_invalid_dtype(self):
with self.assertRaises(TypeError):
testing.generate_matrix(
(2, 2), dtype=numpy.int32, singular_values=1)
def test_invalid_dtype_singular_values(self):
with self.assertRaises(TypeError):
testing.generate_matrix((2, 2), singular_values=1 + 0j)
def test_negative_singular_values(self):
with self.assertRaises(ValueError):
testing.generate_matrix((2, 2), singular_values=[1, -1])
def test_shape_mismatch(self):
with self.assertRaises(ValueError):
testing.generate_matrix(
(2, 2), singular_values=numpy.ones(3))
def test_shape_mismatch_2(self):
with self.assertRaises(ValueError):
testing.generate_matrix(
(0, 2, 2), singular_values=numpy.ones(3))
testing.run_module(__name__, __file__)
| 2,480
| 28.535714
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/testing_tests/test_array.py
|
import unittest
import numpy
import pytest
from chainer import testing
# TODO(niboshi): Add more assert_allclose tests
class TestAssertAllClose(unittest.TestCase):
def test_no_zero_division(self):
# No zero-division should occur when the relative error is inf (y=0).
# That would cause FloatingPointError with
# numpy.errstate(divide='raise').
with numpy.errstate(divide='raise'):
with pytest.raises(AssertionError):
x = numpy.array([1], numpy.float32)
y = numpy.array([0], numpy.float32)
testing.assert_allclose(x, y)
testing.run_module(__name__, __file__)
| 663
| 24.538462
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/testing_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/testing_tests/test_serializer.py
|
import unittest
from chainer.serializers import hdf5
from chainer import testing
class Serializable(object):
def __init__(self, value):
self.value = value
def serialize(self, serializer):
self.value = serializer('value', self.value)
class TestSaveAndLoad(unittest.TestCase):
def setUp(self):
self.src = Serializable(1)
self.dst = Serializable(2)
def test_save_and_load_npz(self):
testing.save_and_load_npz(self.src, self.dst)
self.assertEqual(self.dst.value, 1)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
def test_save_and_load_hdf5(self):
testing.save_and_load_hdf5(self.src, self.dst)
self.assertEqual(self.dst.value, 1)
testing.run_module(__name__, __file__)
| 782
| 22.727273
| 66
|
py
|
chainer
|
chainer-master/tests/chainer_tests/testing_tests/test_function_link.py
|
import unittest
import numpy
import pytest
import six
import chainer
from chainer import initializers
from chainer import testing
from chainer import utils
import chainerx
# Utilities for contiguousness tests.
#
# These tests checks incoming array contiguousness.
# As it's not possible to assume contiguousness of incoming arrays consistently
# (because gradient_check passes contiguous arrays in numerical_grad),
# we instead simulate the test failure. The function implementation raises an
# error if an incoming array matches the expected contiguousness and we expect
# the failure.
class _ContiguousnessMatched(Exception):
pass
def _is_f_contiguous(shape, strides, itemsize):
if numpy.prod(shape) <= 1:
return True
for sh, st in zip(shape, reversed(strides)):
if sh == 1:
continue
if st != itemsize:
return False
itemsize *= sh
return True
def _get_contiguousness(arr):
if isinstance(arr, chainerx.ndarray):
c_contig = arr.is_contiguous
f_contig = _is_f_contiguous(
arr.shape, arr.strides, arr.itemsize)
return (c_contig, f_contig)
return (arr.flags.c_contiguous, arr.flags.f_contiguous)
def _check_contiguousness(arr, expected_contiguous):
if isinstance(arr, chainer.Variable):
_check_contiguousness(arr.array, expected_contiguous)
return
c_contig, f_contig = _get_contiguousness(arr)
if numpy.prod(arr.shape) <= 1:
return # not applicable for this shape
if expected_contiguous is None:
# expected to be non-contiguous
if not c_contig and not f_contig:
raise _ContiguousnessMatched()
elif expected_contiguous == 'C':
# expected to be C-contiguous
if c_contig:
raise _ContiguousnessMatched()
else:
assert False
def _check_grad(grad, expect_grad_none, class_or_tuple):
if expect_grad_none:
assert grad is None
else:
isinstance(grad, class_or_tuple)
def _check_grads(grads, expect_grads_none, class_or_tuple):
for grad, expect_grad_none in six.moves.zip(grads, expect_grads_none):
_check_grad(grad, expect_grad_none, class_or_tuple)
_inject_backend_tests = testing.inject_backend_tests(
None,
[
# CPU tests
{},
{'use_ideep': 'always'},
# GPU tests
{'use_cuda': True},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX tests
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
def _forward_correct(x1, x2):
dt = x1.dtype.type
y1 = (x1 + x2) ** dt(2)
y2 = (x1 ** dt(2)) * (x2 ** dt(2))
return utils.force_array(y1), utils.force_array(y2)
def _backward_correct(x1, x2, gy1, gy2):
dt = x1.dtype.type
ggx1 = (
+ gy1 * dt(2) * (x1 + x2)
+ gy2 * dt(2) * x1 * x2 ** dt(2))
ggx2 = (
+ gy1 * dt(2) * (x1 + x2)
+ gy2 * dt(2) * x1 ** dt(2) * x2)
return ggx1, ggx2
def _double_backward_correct(x1, x2, gy1, gy2, ggx1, ggx2):
dt = x1.dtype.type
ggy1 = (ggx1 + ggx2) * dt(2) * (x1 + x2)
ggy2 = (ggx1 * x2 + ggx2 * x1) * dt(2) * x1 * x2
gx1 = (
+ ggx1 * (dt(2) * gy1 + dt(2) * x2 ** dt(2) * gy2)
+ ggx2 * (dt(2) * gy1 + dt(4) * x1 * x2 * gy2))
gx2 = (
+ ggx1 * (dt(2) * gy1 + dt(4) * x1 * x2 * gy2)
+ ggx2 * (dt(2) * gy1 + dt(2) * x1 ** dt(2) * gy2))
return gx1, gx2, ggy1, ggy2
# TestFunctionTestSuccessful
#
# This test checks for successful case.
# Incoming array types are also checked.
class FuncCorrectlyImplemented(chainer.FunctionNode):
def __init__(
self, device,
expect_grad_outputs_none=(False, False),
expect_grad_grad_inputs_none=(False, False)):
self.device = device
self.expect_grad_outputs_none = expect_grad_outputs_none
self.expect_grad_grad_inputs_none = expect_grad_grad_inputs_none
def forward(self, inputs):
device = self.device
x1, x2 = inputs
if device.xp is chainerx:
fallback_device = device.fallback_device
assert isinstance(x1, fallback_device.supported_array_types)
assert isinstance(x2, fallback_device.supported_array_types)
self.retain_inputs((0, 1))
y1, y2 = _forward_correct(x1, x2)
return utils.force_array(y1), utils.force_array(y2)
def backward(self, indexes, grad_outputs):
device = self.device
_check_grads(
grad_outputs, self.expect_grad_outputs_none,
device.supported_array_types)
x1, x2 = self.get_retained_inputs()
gy1, gy2 = grad_outputs
assert isinstance(x1.array, device.supported_array_types)
assert isinstance(x2.array, device.supported_array_types)
grad_func = FuncGradCorrectlyImplemented(
device,
self.expect_grad_outputs_none,
self.expect_grad_grad_inputs_none)
return grad_func.apply((x1, x2, gy1, gy2))
class FuncGradCorrectlyImplemented(chainer.FunctionNode):
def __init__(
self, device,
expect_grad_outputs_none,
expect_grad_grad_inputs_none):
self.device = device
self.expect_grad_outputs_none = expect_grad_outputs_none
self.expect_grad_grad_inputs_none = expect_grad_grad_inputs_none
def forward(self, inputs_and_grad_outputs):
device = self.device
x1, x2, gy1, gy2 = inputs_and_grad_outputs
if device.xp is chainerx:
fallback_device = device.fallback_device
_check_grads(
(gy1, gy2), self.expect_grad_outputs_none,
fallback_device.supported_array_types)
self.retain_inputs((0, 1, 2, 3))
ggx1, ggx2 = _backward_correct(
x1, x2,
0 if self.expect_grad_outputs_none[0] else gy1,
0 if self.expect_grad_outputs_none[1] else gy2)
return utils.force_array(ggx1), utils.force_array(ggx2)
def backward(self, indexes, grad_grad_inputs):
device = self.device
_check_grads(
grad_grad_inputs, self.expect_grad_grad_inputs_none,
chainer.Variable)
ggx1, ggx2 = grad_grad_inputs
x1, x2, gy1, gy2 = self.get_retained_inputs()
assert isinstance(x1, chainer.Variable)
assert isinstance(x2, chainer.Variable)
assert isinstance(x1.array, device.supported_array_types)
assert isinstance(x2.array, device.supported_array_types)
_check_grads(
(gy1, gy2), self.expect_grad_outputs_none, chainer.Variable)
if not self.expect_grad_outputs_none[0]:
isinstance(gy1.array, device.supported_array_types)
if not self.expect_grad_outputs_none[1]:
isinstance(gy2.array, device.supported_array_types)
gx1, gx2, ggy1, ggy2 = _double_backward_correct(
x1, x2,
0 if self.expect_grad_outputs_none[0] else gy1,
0 if self.expect_grad_outputs_none[1] else gy2,
0 if self.expect_grad_grad_inputs_none[0] else ggx1,
0 if self.expect_grad_grad_inputs_none[1] else ggx2)
return gx1, gx2, ggy1, ggy2
@testing.parameterize(*testing.product({
'shape': [(3, 2), (2,), (1,), (), (2, 0, 3)],
}))
@_inject_backend_tests
class TestFunctionTestSuccessful(testing.FunctionTestCase):
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
return x1, x2
def forward(self, inputs, device):
func = FuncCorrectlyImplemented(device)
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
@_inject_backend_tests
class TestFunctionTestSuccessfulNoneGrads(testing.FunctionTestCase):
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
return x1, x2
def generate_grad_outputs(self, output_templates):
grad_outputs = (
None,
(numpy.random.uniform(-1, 1, output_templates[1].shape)
.astype(output_templates[1].dtype)))
return grad_outputs
def generate_grad_grad_inputs(self, input_templates):
grad_inputs = (
(numpy.random.uniform(-1, 1, input_templates[0].shape)
.astype(input_templates[0].dtype)),
None)
return grad_inputs
def forward(self, inputs, device):
func = FuncCorrectlyImplemented(
device,
expect_grad_outputs_none=(True, False),
expect_grad_grad_inputs_none=(False, True))
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
# TestFunctionTestIncorrectForward
#
# This test checks if it can detect incorrect forward implementation.
class FuncWithIncorrectForward(chainer.FunctionNode):
def forward(self, inputs):
x1, x2 = inputs
y1, y2 = _forward_correct(x1, x2)
y1, y2 = utils.force_array(y1), utils.force_array(y2)
y2[...] += 1 # ! make incorrect
return y1, y2
def backward(self, *args, **kwargs):
assert False # should never be called
@testing.parameterize(*testing.product({
'shape': [(3, 2), (2,), (1,), ()],
}))
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.FunctionTestError)
class TestFunctionTestIncorrectForward(testing.FunctionTestCase):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
return x1, x2
def forward(self, inputs, device):
func = FuncWithIncorrectForward()
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
# TestFunctionTestIncorrectBackward
#
# This test checks if it can detect incorrect backward implementation.
class FuncWithIncorrectBackward(chainer.FunctionNode):
def __init__(self, expect_grad_outputs_none=(False, False)):
self.expect_grad_outputs_none = expect_grad_outputs_none
def forward(self, inputs):
x1, x2 = inputs
y1, y2 = _forward_correct(x1, x2)
self.retain_inputs((0, 1))
return utils.force_array(y1), utils.force_array(y2)
def backward(self, indexes, grad_outputs):
gy1, gy2 = grad_outputs
x1, x2 = self.get_retained_inputs()
ggx1, ggx2 = _backward_correct(
x1, x2,
0 if self.expect_grad_outputs_none[0] else gy1,
0 if self.expect_grad_outputs_none[1] else gy2)
ggx1 = ggx1 + 100000
ggx2 = ggx2 + 10000 # ! make incorrect
return utils.force_array(ggx1), utils.force_array(ggx2)
@testing.parameterize(*testing.product({
'shape': [(3, 2), (2,), (1,), ()],
}))
@testing.fix_random()
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.FunctionTestError)
class TestFunctionTestIncorrectBackward(testing.FunctionTestCase):
skip_forward_test = True
skip_double_backward_test = True
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
return x1, x2
def forward(self, inputs, device):
func = FuncWithIncorrectBackward()
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
@testing.fix_random()
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.FunctionTestError)
class TestFunctionTestIncorrectBackwardNoneGrads(testing.FunctionTestCase):
skip_forward_test = True
skip_double_backward_test = True
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
return x1, x2
def generate_grad_outputs(self, output_templates):
grad_outputs = (
None,
(numpy.random.uniform(-1, 1, output_templates[1].shape)
.astype(output_templates[1].dtype)))
return grad_outputs
def forward(self, inputs, device):
func = FuncWithIncorrectBackward(
expect_grad_outputs_none=(True, False))
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
# TestFunctionTestIncorrectDoubleBackward
#
# This test checks if it can detect incorrect double backward implementation.
class FuncWithIncorrectDoubleBackward(chainer.FunctionNode):
def __init__(
self,
expect_grad_outputs_none=(False, False),
expect_grad_grad_inputs_none=(False, False)):
self.expect_grad_outputs_none = expect_grad_outputs_none
self.expect_grad_grad_inputs_none = expect_grad_grad_inputs_none
def forward(self, inputs):
x1, x2 = inputs
y1, y2 = _forward_correct(x1, x2)
self.retain_inputs((0, 1))
return utils.force_array(y1), utils.force_array(y2)
def backward(self, indexes, grad_outputs):
x1, x2 = self.get_retained_inputs()
gy1, gy2 = grad_outputs
grad_func = FuncGradWithIncorrectDoubleBackward(
expect_grad_outputs_none=self.expect_grad_outputs_none,
expect_grad_grad_inputs_none=self.expect_grad_grad_inputs_none)
return grad_func.apply((x1, x2, gy1, gy2))
class FuncGradWithIncorrectDoubleBackward(chainer.FunctionNode):
def __init__(
self,
expect_grad_outputs_none=(False, False),
expect_grad_grad_inputs_none=(False, False)):
self.expect_grad_outputs_none = expect_grad_outputs_none
self.expect_grad_grad_inputs_none = expect_grad_grad_inputs_none
def forward(self, inputs_and_grad_outputs):
x1, x2, gy1, gy2 = inputs_and_grad_outputs
self.retain_inputs((0, 1, 2, 3))
ggx1, ggx2 = _backward_correct(
x1, x2,
0 if self.expect_grad_outputs_none[0] else gy1,
0 if self.expect_grad_outputs_none[1] else gy2)
return utils.force_array(ggx1), utils.force_array(ggx2)
def backward(self, indexes, grad_grad_inputs):
ggx1, ggx2 = grad_grad_inputs
x1, x2, gy1, gy2 = self.get_retained_inputs()
gx1, gx2, ggy1, ggy2 = _double_backward_correct(
x1, x2,
0 if self.expect_grad_outputs_none[0] else gy1,
0 if self.expect_grad_outputs_none[1] else gy2,
0 if self.expect_grad_grad_inputs_none[0] else ggx1,
0 if self.expect_grad_grad_inputs_none[1] else ggx2)
ggy2 = ggy2 + 10000 # ! make incorrect
return gx1, gx2, ggy1, ggy2
@testing.parameterize(*testing.product({
'shape': [(3, 2), (2,), (1,), ()],
}))
@testing.fix_random()
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.FunctionTestError)
class TestFunctionTestIncorrectDoubleBackward(testing.FunctionTestCase):
skip_forward_test = True
skip_backward_test = True
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
return x1, x2
def forward(self, inputs, device):
func = FuncWithIncorrectDoubleBackward()
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
@testing.fix_random()
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.FunctionTestError)
class TestFunctionTestIncorrectDoubleBackwardNoneGrads(
testing.FunctionTestCase):
skip_forward_test = True
skip_backward_test = True
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
return x1, x2
def generate_grad_outputs(self, output_templates):
grad_outputs = (
None,
(numpy.random.uniform(-1, 1, output_templates[1].shape)
.astype(output_templates[1].dtype)))
return grad_outputs
def generate_grad_grad_inputs(self, input_templates):
grad_inputs = (
(numpy.random.uniform(-1, 1, input_templates[0].shape)
.astype(input_templates[0].dtype)),
None)
return grad_inputs
def forward(self, inputs, device):
func = FuncWithIncorrectDoubleBackward(
expect_grad_outputs_none=(True, False),
expect_grad_grad_inputs_none=(False, True))
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
# FunctionTestCaseArrayContiguousnessTest
class FuncWithContiguousnessCheck(chainer.FunctionNode):
def __init__(self, contiguous, check_on):
self.contiguous = contiguous
self.check_on = check_on
def _check_contiguousness(self, arr):
assert isinstance(arr, chainer.get_array_types())
_check_contiguousness(arr, self.contiguous)
def forward(self, inputs):
x1, x2 = inputs
if self.check_on == 'forward_input':
self._check_contiguousness(x1)
self._check_contiguousness(x2)
self.retain_inputs((0, 1))
y1, y2 = _forward_correct(x1, x2)
return utils.force_array(y1), utils.force_array(y2)
def backward(self, indexes, grad_outputs):
x1, x2 = self.get_retained_inputs()
gy1, gy2 = grad_outputs
if self.check_on == 'backward_retained_input':
self._check_contiguousness(x1.array)
self._check_contiguousness(x2.array)
elif self.check_on == 'backward_grad_output':
self._check_contiguousness(gy1.array)
self._check_contiguousness(gy2.array)
grad_func = FuncGradWithContiguousnessCheck(
self.contiguous, self.check_on)
return grad_func.apply((x1, x2, gy1, gy2))
class FuncGradWithContiguousnessCheck(chainer.FunctionNode):
def __init__(self, contiguous, check_on):
self.contiguous = contiguous
self.check_on = check_on
def _check_contiguousness(self, arr):
testing.function_link._check_contiguousness(arr, self.contiguous)
def forward(self, inputs_and_grad_outputs):
x1, x2, gy1, gy2 = inputs_and_grad_outputs
self.retain_inputs((0, 1, 2, 3))
ggx1, ggx2 = _backward_correct(x1, x2, gy1, gy2)
return utils.force_array(ggx1), utils.force_array(ggx2)
def backward(self, indexes, grad_grad_inputs):
ggx1, ggx2 = grad_grad_inputs
if self.check_on == 'double_backward_grad_grad_input':
self._check_contiguousness(ggx1)
self._check_contiguousness(ggx2)
x1, x2, gy1, gy2 = self.get_retained_inputs()
gx1, gx2, ggy1, ggy2 = _double_backward_correct(
x1, x2, gy1, gy2, ggx1, ggx2)
return gx1, gx2, ggy1, ggy2
@testing.parameterize(*testing.product({
'shape': [(3, 2), (2,), (1, 2)],
'contiguous': [None, 'C'],
'check_on': [ # Check points in which contiguousness is probed.
'forward_input',
# TODO(niboshi): As gradient_check.check_backward currently copies the
# grads without preserving strides, they cannot be non-contiguous.
# Enable this check after check_backward will be fixed.
# 'backward_grad_output',
'backward_retained_input',
# TODO(niboshi): Enable this check after check_backward will be fixed.
# 'double_backward_grad_grad_input',
]}))
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=_ContiguousnessMatched)
class FunctionTestCaseArrayContiguousnessTest(testing.FunctionTestCase):
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
return x1, x2
def forward(self, inputs, device):
func = FuncWithContiguousnessCheck(self.contiguous, self.check_on)
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
def before_test(self, test_name):
# Some combinations of test methods and check points are irrelevant.
# Skip such combinations.
# For example, `test_forward` method does not generate grad_outputs.
if test_name == 'test_forward':
if self.check_on != 'forward_input':
raise unittest.SkipTest()
if test_name == 'test_backward':
if self.check_on == 'double_backward_grad_grad_input':
raise unittest.SkipTest()
class Dot(chainer.FunctionNode):
def __init__(
self, incorrect_forward=False, incorrect_backward_gx=False,
incorrect_backward_gp=False, contiguous=None,
check_on=None):
self.incorrect_forward = incorrect_forward
self.incorrect_backward_gx = incorrect_backward_gx
self.incorrect_backward_gp = incorrect_backward_gp
self.contiguous = contiguous
self.check_on = check_on
def forward(self, inputs):
self.retain_inputs((0, 1))
xp = chainer.backend.get_array_module(*inputs)
x, p = inputs
if self.check_on == 'forward_input':
self._check_contiguousness(x)
self._check_contiguousness(p)
y = xp.dot(x, p)
if self.incorrect_forward:
y *= 9999
return y,
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
x, p = self.get_retained_inputs()
if self.check_on == 'backward_retained_input':
self._check_contiguousness(x.array)
self._check_contiguousness(p.array)
elif self.check_on == 'backward_grad_output':
self._check_contiguousness(gy.array)
gx = chainer.functions.matmul(gy, p.T)
gp = chainer.functions.matmul(x.T, gy)
if self.incorrect_backward_gx:
gx /= 2
if self.incorrect_backward_gp:
gp += 1000
return gx, gp
def _check_contiguousness(self, arr):
assert isinstance(arr, chainer.get_array_types())
_check_contiguousness(arr, self.contiguous)
class DotLink(chainer.Link):
"""correctly implemented dot."""
def __init__(
self, in_size, out_size, initial_p=None, contiguous=None,
check_on=None):
super(DotLink, self).__init__()
with self.init_scope():
if initial_p is None:
initial_p = initializers.Constant(1)
self.p = chainer.Parameter(initial_p, shape=(in_size, out_size))
self.contiguous = contiguous
self.check_on = check_on
def forward(self, inputs):
x = inputs
p = self.p
contiguous = self.contiguous
check_on = self.check_on
y, = Dot(contiguous=contiguous, check_on=check_on).apply((x, p))
return y
class DotLinkIncorrectForward(DotLink):
"""Incorrectly implemented dot (forward)."""
def __init__(self, *args, **kwargs):
super(DotLinkIncorrectForward, self).__init__(*args, **kwargs)
def forward(self, inputs):
x = inputs
p = self.p
y, = Dot(incorrect_forward=True).apply((x, p))
return y
class DotLinkIncorrectBackward(DotLink):
"""Incorrect implementation of dot (backward)."""
def __init__(self, incorrect_gx, incorrect_gp, *args, **kwargs):
super(DotLinkIncorrectBackward, self).__init__(*args, **kwargs)
self.incorrect_gx = incorrect_gx
self.incorrect_gp = incorrect_gp
def forward(self, inputs):
x = inputs
p = self.p
y, = Dot(
incorrect_backward_gx=self.incorrect_gx,
incorrect_backward_gp=self.incorrect_gp).apply((x, p))
return y
class DotLinkIncorrectInitialization(DotLink):
"""Incorrect implementation of dot (parameter initialization)."""
def __init__(self, in_size, out_size, initial_p=None):
# Ignores given initializer here.
super(DotLinkIncorrectInitialization, self).__init__(
in_size, out_size, initializers.Constant(0))
class DotLinkTestBase(object):
param_names = ('p',)
def setUp(self):
self.n = 1
self.in_size = 2
self.out_size = 3
self.dtype = numpy.float32
def generate_params(self):
in_size = self.in_size
out_size = self.out_size
return numpy.random.uniform(
-1, 1, (in_size, out_size)).astype(self.dtype),
def create_link(self, initializers):
initial_p, = initializers
in_size = self.in_size
out_size = self.out_size
return DotLink(in_size, out_size, initial_p)
def generate_inputs(self):
return numpy.random.rand(self.n, self.in_size).astype(self.dtype),
# Required for forward backward tests.
def forward_expected(self, link, inputs):
p = link.p.array
x, = inputs
return numpy.dot(x, p),
# Requires for initializers test.
def get_initializers(self):
return [
initializers.Constant(0), 2,
testing.InitializerArgument(None, initializers.Constant(1))],
@_inject_backend_tests
class TestLinkCorrect(DotLinkTestBase, testing.LinkTestCase):
pass
@_inject_backend_tests
class TestLinkInitializersCorrect(
DotLinkTestBase, testing.LinkInitializersTestCase):
pass
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.LinkTestError)
class TestLinkIncorrectForward(DotLinkTestBase, testing.LinkTestCase):
skip_backward_test = True
def create_link(self, initializers):
initial_p, = initializers
in_size = self.in_size
out_size = self.out_size
link = DotLinkIncorrectForward(in_size, out_size, initial_p)
return link
@testing.fix_random()
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.LinkTestError)
class TestLinkIncorrectBackwardInput(DotLinkTestBase, testing.LinkTestCase):
skip_forward_test = True
def create_link(self, initializers):
initial_p, = initializers
in_size = self.in_size
out_size = self.out_size
link = DotLinkIncorrectBackward(
True, False, in_size, out_size, initial_p)
return link
@testing.fix_random()
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.LinkTestError)
class TestLinkIncorrectBackwardParam(DotLinkTestBase, testing.LinkTestCase):
skip_forward_test = True
def create_link(self, initializers):
initial_p, = initializers
in_size = self.in_size
out_size = self.out_size
link = DotLinkIncorrectBackward(
False, True, in_size, out_size, initial_p)
return link
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=TypeError)
class TestLinkIncorrectCreateLink(DotLinkTestBase, testing.LinkTestCase):
def create_link(self, initializers):
# Invalid return type (that is not an instance of chainer.Link).
return numpy.array([1])
@testing.parameterize(*testing.product({
'invalid_forward_backward_initializer': [
chainer.Variable(numpy.array([1])),
chainer.Parameter(numpy.array([1])),
]}))
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=TypeError)
class TestLinkIncorrectForwardBackwardInitializers(
DotLinkTestBase, testing.LinkTestCase):
def generate_params(self):
return self.invalid_forward_backward_initializer,
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.LinkTestError)
class TestLinkIncorrectBackwardInitializers(
DotLinkTestBase, testing.LinkInitializersTestCase):
def create_link(self, initializers):
initial_p, = initializers
in_size = self.in_size
out_size = self.out_size
link = DotLinkIncorrectInitialization(in_size, out_size, initial_p)
return link
@testing.parameterize(*testing.product({
'invalid_initializer': [
chainer.Variable(numpy.array([1])),
chainer.Parameter(numpy.array([1])),
]}))
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=TypeError)
class TestLinkIncorrectInitializers(
DotLinkTestBase, testing.LinkInitializersTestCase):
def get_initializers(self):
return [self.invalid_initializer],
@testing.parameterize(*testing.product({
'contiguous': [None, 'C'],
'check_on': [ # Check points in which contiguousness is probed.
'forward_input',
# TODO(hvy): As gradient_check.check_backward currently copies the
# grads without preserving strides, they cannot be non-contiguous.
# Enable this check after check_backward will be fixed.
# 'backward_grad_output',
'backward_retained_input',
# TODO(hvy): Enable this check after check_backward will be fixed.
# 'double_backward_grad_grad_input',
]}))
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=_ContiguousnessMatched)
class TestLinkContiguousness(DotLinkTestBase, testing.LinkTestCase):
def before_test(self, test_name):
# Some combinations of test methods and check points are irrelevant.
# Skip such combinations.
# For example, `test_forward` method does not generate grad_outputs.
if test_name == 'test_forward':
if self.check_on != 'forward_input':
raise unittest.SkipTest()
def create_link(self, initializers):
initial_p, = initializers
in_size = self.in_size
out_size = self.out_size
contiguous = self.contiguous
check_on = self.check_on
link = DotLink(
in_size, out_size, initial_p, contiguous=contiguous,
check_on=check_on)
return link
testing.run_module(__name__, __file__)
| 30,426
| 32.036916
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/exporters_tests/test_caffe.py
|
import os
import unittest
import warnings
import numpy
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import testing
# The caffe submodule relies on protobuf which under protobuf==3.7.0 and
# Python 3.7 raises a DeprecationWarning from the collections module.
with warnings.catch_warnings():
warnings.filterwarnings(action='ignore', category=DeprecationWarning)
from chainer.exporters import caffe
# @testing.parameterize([
# {'layer': 'LinearFunction'},
# {'layer': 'Reshape'},
# {'layer': 'Convolution2DFunction'},
# {'layer': 'Deconvolution2DFunction'},
# {'layer': 'AveragePooling2D'},
# {'layer': 'MaxPooling2D'},
# {'layer': 'BatchNormalization'},
# {'layer': 'ReLU'},
# {'layer': 'LeakyReLU'},
# {'layer': 'Softmax'},
# {'layer': 'Sigmoid'},
# {'layer': 'Add'},
# ])
class TestCaffeExport(unittest.TestCase):
def setUp(self):
x = numpy.random.uniform(-1, 1, (1, 3, 7, 7)).astype(numpy.float32)
self.x = chainer.Variable(x)
def test_caffe_export_model(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
with self.init_scope():
self.l1 = L.Convolution2D(None, 1, 1, 1, 0, groups=1)
self.b2 = L.BatchNormalization(1, eps=1e-2)
self.l3 = L.Deconvolution2D(None, 1, 1, 1, 0, groups=1)
self.l4 = L.Linear(None, 1)
def forward(self, x):
h = F.relu(self.l1(x))
h = self.b2(h)
h = self.l3(h)
return self.l4(h)
assert_export_import_match(Model(), self.x)
def test_Reshape(self):
class Link(chainer.Chain):
def forward(self, x):
return F.reshape(x, (-1,))
assert_export_import_match(Link(), self.x)
def test_LinearFunction(self):
W = numpy.random.uniform(-1, 1, (1, numpy.prod(self.x.shape[1:])))
class Link(chainer.Chain):
def __call__(self, x):
return F.linear(x, W)
assert_export_import_match(Link(), self.x)
def test_AveragePooling2D(self):
class Link(chainer.Chain):
def forward(self, x):
return F.average_pooling_2d(x, 1, 1, 0)
assert_export_import_match(Link(), self.x)
def test_MaxPooling2D(self):
class Link(chainer.Chain):
def forward(self, x):
return F.max_pooling_2d(x, 1, 1, 0)
assert_export_import_match(Link(), self.x)
def test_LeakyReLU(self):
class Link(chainer.Chain):
def __call__(self, x):
return F.leaky_relu(x, slope=0.1)
assert_export_import_match(Link(), self.x)
def test_Softmax(self):
class Link(chainer.Chain):
def forward(self, x):
return F.softmax(x)
assert_export_import_match(Link(), self.x)
def test_Sigmoid(self):
class Link(chainer.Chain):
def forward(self, x):
return F.sigmoid(x)
assert_export_import_match(Link(), self.x)
def test_Add(self):
class Link(chainer.Chain):
def forward(self, x):
return x + x
assert_export_import_match(Link(), self.x)
def assert_export_import_match(l1, x):
"""Asserts that results from original Link and re-imported Link are close.
"""
l2 = export_and_import(l1, (x,), True)
inputs = {'data': x}
outputs = [l2.layers[-1][0]]
with chainer.using_config('train', False):
for v1, v2 in zip(l1(x), l2(inputs=inputs, outputs=outputs)[0]):
testing.assert_allclose(v1.data, v2.data)
def export_and_import(l, args, export_params=True, graph_name='test'):
"""Exports the given Link as Caffe model and returns the re-imported Link.
"""
with chainer.utils.tempdir() as tempdir:
caffe.export(l, args, tempdir, export_params, graph_name)
prototxt = os.path.join(tempdir, 'chainer_model.prototxt')
caffemodel = os.path.join(tempdir, 'chainer_model.caffemodel')
assert os.path.exists(prototxt)
assert os.path.exists(caffemodel)
# with open(prototxt) as f: print(f.read())
return L.caffe.CaffeFunction(caffemodel)
testing.run_module(__name__, __file__)
| 4,411
| 28.218543
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/exporters_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_categorical.py
|
import numpy
from chainer import cuda
from chainer import distributions
from chainer import testing
from chainer.testing import array
@testing.parameterize(*testing.product({
'shape': [(3, 2), (1,)],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
# 'extreme_values': [True, False],
'extreme_values': [False],
'logit_option': [True, False]
}))
@testing.fix_random()
@testing.with_requires('scipy>=0.19.0')
class TestCategorical(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Categorical
self.scipy_dist = stats.multinomial
self.test_targets = set([
'batch_shape', 'event_shape', 'entropy', 'log_prob', 'sample'])
if self.logit_option:
if self.extreme_values:
logit = -numpy.inf \
* numpy.ones((3,)+self.shape).astype(numpy.float32)
logit[0] = 0.
logit = numpy.rollaxis(logit, 0, logit.ndim)
else:
logit = numpy.random.normal(
size=self.shape+(3,)).astype(numpy.float32)
p = numpy.exp(logit)
p /= numpy.expand_dims(p.sum(axis=-1), axis=-1)
self.params = {'logit': logit}
else:
if self.extreme_values:
p = numpy.zeros((3,)+self.shape).astype(numpy.float32)
p[0] = 1.
p = numpy.rollaxis(p, 0, p.ndim)
else:
logit = numpy.random.normal(
size=self.shape+(3,)).astype(numpy.float32)
p = numpy.exp(logit)
p /= numpy.expand_dims(p.sum(axis=-1), axis=-1)
self.params = {'p': p}
n = numpy.ones(self.shape)
self.scipy_params = {'n': n, 'p': p}
self.continuous = False
self.old_settings = None
if self.extreme_values:
self.old_settings = numpy.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
if self.old_settings is not None:
numpy.seterr(**self.old_settings)
def sample_for_test(self):
smp = numpy.random.randint(
0, 3, self.sample_shape + self.shape).astype(numpy.int32)
return smp
def check_log_prob(self, is_gpu):
smp = self.sample_for_test()
if is_gpu:
log_prob1 = self.gpu_dist.log_prob(cuda.to_gpu(smp)).data
else:
log_prob1 = self.cpu_dist.log_prob(smp).data
scipy_prob = self.scipy_dist.logpmf
onebyone_smp = smp.reshape(
(int(numpy.prod(self.sample_shape)),
numpy.prod(self.shape),
int(numpy.prod(self.event_shape))))
onebyone_smp = numpy.swapaxes(onebyone_smp, 0, 1)
onebyone_smp = onebyone_smp.reshape((-1,) + self.sample_shape
+ self.event_shape)
log_prob2 = []
for one_params, one_smp in zip(
self.scipy_onebyone_params_iter(), onebyone_smp):
one_smp = numpy.eye(3)[one_smp]
log_prob2.append(scipy_prob(one_smp, **one_params))
log_prob2 = numpy.vstack(log_prob2)
log_prob2 = log_prob2.reshape(numpy.prod(self.shape), -1).T
log_prob2 = log_prob2.reshape(self.sample_shape + self.shape)
array.assert_allclose(log_prob1, log_prob2)
def check_sample(self, is_gpu):
if is_gpu:
smp1 = self.gpu_dist.sample(
sample_shape=(100000,)+self.sample_shape).data
else:
smp1 = self.cpu_dist.sample(
sample_shape=(100000,)+self.sample_shape).data
smp2 = []
for one_params in self.scipy_onebyone_params_iter():
smp2.append(self.scipy_dist.rvs(
size=(100000,)+self.sample_shape, **one_params))
smp2 = numpy.vstack(smp2)
smp2 = smp2.dot(numpy.arange(3))
smp2 = smp2.reshape((numpy.prod(self.shape), 100000)
+ self.sample_shape
+ self.cpu_dist.event_shape)
smp2 = numpy.rollaxis(
smp2, 0, smp2.ndim-len(self.cpu_dist.event_shape))
smp2 = smp2.reshape((100000,) + self.sample_shape + self.shape
+ self.cpu_dist.event_shape)
array.assert_allclose(smp1.mean(axis=0), smp2.mean(axis=0),
atol=3e-2, rtol=3e-2)
array.assert_allclose(smp1.std(axis=0), smp2.std(axis=0),
atol=3e-2, rtol=3e-2)
testing.run_module(__name__, __file__)
| 4,637
| 35.234375
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_one_hot_categorical.py
|
import numpy
from chainer import cuda
from chainer import distributions
from chainer import testing
def _numpy_stack(xs, axis):
try:
return numpy.stack(xs, axis)
except AttributeError:
# in case numpy<1.10, which does not have numpy.stack
return numpy.concatenate(
[numpy.expand_dims(x, axis) for x in xs],
axis=axis)
def _numpy_random_multinomial(n, pvals, size):
pvals = pvals.astype(numpy.float64)
return numpy.random.multinomial(n, pvals, size)
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
'k': [3],
}))
@testing.fix_random()
@testing.with_requires('scipy>=0.19')
class TestOneHotCategorical(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.OneHotCategorical
self.scipy_dist = stats.multinomial
self.test_targets = set([
'batch_shape', 'event_shape', 'mean', 'sample'])
n = numpy.ones(self.shape).astype(numpy.int32)
p = numpy.random.normal(
size=self.shape+(self.k,)).astype(numpy.float32)
p = numpy.exp(p)
p /= p.sum(axis=-1, keepdims=True)
self.n, self.p = n, p
self.params = {'p': p}
self.scipy_params = {'n': n, 'p': p}
self.continuous = False
self.event_shape = (self.k,)
def sample_for_test(self):
obo_p = self.p.reshape(-1, self.k)
obo_n = self.n.reshape(-1)
smp = [_numpy_random_multinomial(one_n, one_p, size=self.sample_shape)
for one_n, one_p in zip(obo_n, obo_p)]
smp = _numpy_stack(smp, axis=-2)
smp = smp.reshape(self.sample_shape + self.shape + (self.k,))
return smp
def check_log_prob(self, is_gpu):
smp = self.sample_for_test()
if is_gpu:
log_prob1 = self.gpu_dist.log_prob(cuda.to_gpu(smp)).data
else:
log_prob1 = self.cpu_dist.log_prob(smp).data
onebyone_smp = smp.reshape(self.sample_shape + (-1,) + (self.k,))
onebyone_smp = numpy.rollaxis(onebyone_smp, -2, 0)
onebyone_smp = onebyone_smp.reshape(
(-1,) + self.sample_shape + (self.k,))
log_prob2 = []
for one_params, one_smp in zip(
self.scipy_onebyone_params_iter(), onebyone_smp):
log_prob2.append(self.scipy_dist.logpmf(one_smp, **one_params))
log_prob2 = _numpy_stack(log_prob2, axis=-1)
log_prob2 = log_prob2.reshape(self.sample_shape + self.shape)
testing.assert_allclose(log_prob1, log_prob2)
def test_log_prob_cpu(self):
self.check_log_prob(False)
@testing.attr.gpu
def test_log_prob_gpu(self):
self.check_log_prob(True)
testing.run_module(__name__, __file__)
| 2,892
| 30.445652
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_normal.py
|
import numpy
from chainer import distributions
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
'log_scale_option': [True, False],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestNormal(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Normal
self.scipy_dist = stats.norm
self.test_targets = set([
'batch_shape', 'cdf', 'entropy', 'event_shape', 'icdf', 'log_cdf',
'log_prob', 'log_survival', 'mean', 'prob', 'sample', 'stddev',
'support', 'survival', 'variance'])
loc = utils.force_array(
numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32))
if self.log_scale_option:
log_scale = utils.force_array(
numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32))
scale = numpy.exp(log_scale)
self.params = {'loc': loc, 'log_scale': log_scale}
self.scipy_params = {'loc': loc, 'scale': scale}
else:
scale = utils.force_array(numpy.exp(
numpy.random.uniform(-1, 1, self.shape)).astype(numpy.float32))
self.params = {'loc': loc, 'scale': scale}
self.scipy_params = {'loc': loc, 'scale': scale}
def sample_for_test(self):
smp = numpy.random.normal(
size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
testing.run_module(__name__, __file__)
| 1,680
| 31.960784
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_independent.py
|
import functools
import itertools
import operator
import numpy
from chainer import distributions
from chainer import testing
from chainer.testing import array
from chainer.testing import attr
from chainer import utils
def skip_not_in_params(property):
def decorator(f):
@functools.wraps(f)
def new_f(self, *args, **kwargs):
if property not in self.params.keys():
self.skipTest(
"\'%s\' does not exist in params.keys()." % property)
else:
f(self, *args, **kwargs)
return new_f
return decorator
def _generate_valid_shape_pattern(
inner_shape, inner_event_shape, reinterpreted_batch_ndims):
shape_pattern = []
for bs, es, m in itertools.product(
inner_shape, inner_event_shape, reinterpreted_batch_ndims):
if (m is not None) and (m > len(bs)):
continue
shape_pattern.append({
'full_shape': bs + es,
'inner_shape': bs,
'inner_event_shape': es,
'reinterpreted_batch_ndims': m
})
return shape_pattern
def _generate_test_parameter(
parameter_list, inner_shape, inner_event_shape,
reinterpreted_batch_ndims):
shape_pattern = _generate_valid_shape_pattern(
inner_shape, inner_event_shape, reinterpreted_batch_ndims)
return [
dict(dicts[0], **dicts[1])
for dicts in itertools.product(parameter_list, shape_pattern)
]
@testing.parameterize(*_generate_test_parameter(
testing.product({
'sample_shape': [(3, 2), ()],
'is_variable': [True, False]
}),
inner_shape=[(4, 5), (5,), ()],
inner_event_shape=[()],
reinterpreted_batch_ndims=[1, 0, None]
))
@testing.fix_random()
@testing.with_requires('scipy')
class TestIndependentNormal(testing.distribution_unittest):
scipy_onebyone = True
def _build_inner_distribution(self):
pass
def setUp_configure(self):
from scipy import stats
self.dist = lambda **params: distributions.Independent(
distributions.Normal(**params), self.reinterpreted_batch_ndims)
self.test_targets = set([
"batch_shape", "entropy", "event_shape", "log_prob",
"support"])
loc = utils.force_array(numpy.random.uniform(
-1, 1, self.full_shape).astype(numpy.float32))
scale = utils.force_array(numpy.exp(numpy.random.uniform(
-1, 1, self.full_shape)).astype(numpy.float32))
if self.reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = max(0, len(self.inner_shape) - 1)
else:
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims
batch_ndim = len(self.inner_shape) - reinterpreted_batch_ndims
self.shape = self.inner_shape[:batch_ndim]
self.event_shape = \
self.inner_shape[batch_ndim:] + self.inner_event_shape
d = functools.reduce(operator.mul, self.event_shape, 1)
if self.event_shape == ():
self.scipy_dist = stats.norm
self.params = {"loc": loc, "scale": scale}
self.scipy_params = {"loc": loc, "scale": scale}
else:
self.scipy_dist = stats.multivariate_normal
scale_tril = numpy.eye(d).astype(numpy.float32) * \
scale.reshape(self.shape + (d,))[..., None]
cov = numpy.einsum('...ij,...jk->...ik', scale_tril, scale_tril)
self.params = {"loc": loc, "scale": scale}
self.scipy_params = {"mean": numpy.reshape(
loc, self.shape + (d,)), "cov": cov}
def sample_for_test(self):
smp = numpy.random.normal(
size=self.sample_shape + self.full_shape
).astype(numpy.float32)
return smp
def test_batch_ndim_error(self):
with self.assertRaises(ValueError):
distributions.Independent(
distributions.Normal(**self.params),
len(self.inner_shape) + 1)
def check_covariance(self, is_gpu):
if is_gpu:
cov1 = self.gpu_dist.covariance.array
else:
cov1 = self.cpu_dist.covariance.array
cov2 = self.params['cov']
array.assert_allclose(cov1, cov2)
@skip_not_in_params('cov')
def test_covariance_cpu(self):
self.check_covariance(False)
@skip_not_in_params('cov')
@attr.gpu
def test_covariance_gpu(self):
self.check_covariance(True)
testing.run_module(__name__, __file__)
| 4,547
| 30.365517
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_cauchy.py
|
import numpy
from chainer.backends import cuda
from chainer import distributions
from chainer import testing
from chainer.testing import array
from chainer.testing import attr
from chainer import utils
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestCauchy(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Cauchy
self.scipy_dist = stats.cauchy
self.test_targets = set(['batch_shape', 'cdf', 'entropy',
'event_shape', 'icdf', 'log_prob',
'support'])
loc = utils.force_array(
numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32))
scale = utils.force_array(numpy.exp(
numpy.random.uniform(-1, 1, self.shape)).astype(numpy.float32))
self.params = {'loc': loc, 'scale': scale}
self.scipy_params = {'loc': loc, 'scale': scale}
def sample_for_test(self):
smp = numpy.random.normal(
size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
def check_mean(self, is_gpu):
with testing.assert_warns(RuntimeWarning):
if is_gpu:
mean1 = self.gpu_dist.mean.data
else:
mean1 = self.cpu_dist.mean.data
if self.scipy_onebyone:
mean2 = []
for one_params in self.scipy_onebyone_params_iter():
mean2.append(self.scipy_dist.mean(**one_params))
mean2 = numpy.vstack(mean2).reshape(
self.shape + self.cpu_dist.event_shape)
else:
mean2 = self.scipy_dist.mean(**self.scipy_params)
array.assert_allclose(mean1, mean2)
def test_mean_cpu(self):
self.check_mean(False)
@attr.gpu
def test_mean_gpu(self):
self.check_mean(True)
def check_sample(self, is_gpu):
if is_gpu:
smp1 = self.gpu_dist.sample(
sample_shape=(100000,)+self.sample_shape).data
smp1 = cuda.to_cpu(smp1)
else:
smp1 = self.cpu_dist.sample(
sample_shape=(100000,)+self.sample_shape).data
smp2 = self.scipy_dist.rvs(
size=(100000,)+self.sample_shape+self.shape, **self.scipy_params)
testing.assert_allclose(numpy.median(smp1, axis=0),
numpy.median(smp2, axis=0),
atol=3e-2, rtol=3e-2)
def test_sample_cpu(self):
self.check_sample(False)
@attr.gpu
def test_sample_gpu(self):
self.check_sample(True)
def check_variance(self, is_gpu):
with testing.assert_warns(RuntimeWarning):
if is_gpu:
variance1 = self.gpu_dist.variance.data
else:
variance1 = self.cpu_dist.variance.data
if self.scipy_onebyone:
variance2 = []
for one_params in self.scipy_onebyone_params_iter():
variance2.append(self.scipy_dist.var(**one_params))
variance2 = numpy.vstack(variance2).reshape(
self.shape + self.cpu_dist.event_shape)
else:
variance2 = self.scipy_dist.var(**self.scipy_params)
array.assert_allclose(variance1, variance2)
def test_variance_cpu(self):
self.check_variance(False)
@attr.gpu
def test_variance_gpu(self):
self.check_variance(True)
testing.run_module(__name__, __file__)
| 3,681
| 31.298246
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_chisquare.py
|
import numpy
from chainer import distributions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestChisquare(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Chisquare
self.scipy_dist = stats.chi2
self.test_targets = set([
'batch_shape', 'entropy', 'event_shape', 'log_prob', 'mean',
'sample', 'support', 'variance'])
k = numpy.random.randint(1, 10, self.shape).astype(numpy.float32)
self.params = {'k': k}
self.scipy_params = {'df': k}
self.support = 'positive'
def sample_for_test(self):
smp = numpy.random.chisquare(
df=1, size=self.sample_shape + self.shape
).astype(numpy.float32)
return smp
testing.run_module(__name__, __file__)
| 1,045
| 24.512195
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_laplace.py
|
import unittest
import numpy
from chainer.backends import cuda
from chainer import distributions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer import utils
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestLaplace(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Laplace
self.scipy_dist = stats.laplace
self.test_targets = set([
'batch_shape', 'cdf', 'entropy', 'event_shape', 'icdf', 'log_prob',
'mean', 'prob', 'sample', 'stddev', 'support', 'variance'])
loc = utils.force_array(
numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32))
scale = utils.force_array(numpy.exp(
numpy.random.uniform(-1, 1, self.shape)).astype(numpy.float32))
self.params = {'loc': loc, 'scale': scale}
self.scipy_params = {'loc': loc, 'scale': scale}
def sample_for_test(self):
smp = numpy.random.normal(
size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'dtype': [numpy.float32, numpy.float64],
}))
class TestLaplaceCDF(unittest.TestCase):
def setUp(self):
self.x = numpy.random.normal(size=self.shape).astype(self.dtype)
self.gy = numpy.random.normal(size=self.shape).astype(self.dtype)
self.backward_options = {'atol': 1e-2, 'rtol': 1e-2}
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
distributions.laplace._laplace_cdf,
x_data, y_grad, **self.backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'dtype': [numpy.float32, numpy.float64],
}))
class TestLaplaceICDF(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(
low=0.02, high=0.98, size=self.shape).astype(self.dtype)
self.gy = numpy.random.normal(size=self.shape).astype(self.dtype)
self.backward_options = {'atol': 1e-2, 'rtol': 1e-2}
def check_forward(self, x_data):
y = distributions.laplace._laplace_icdf(x_data)
cdf = distributions.laplace._laplace_cdf(y)
testing.assert_allclose(cdf.array, x_data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
distributions.laplace._laplace_icdf,
x_data, y_grad, **self.backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| 3,310
| 29.657407
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_poisson.py
|
import numpy
from chainer import distributions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestLogNormal(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Poisson
self.scipy_dist = stats.poisson
self.test_targets = set([
'batch_shape', 'event_shape', 'log_prob', 'mean', 'sample',
'support', 'variance'])
lam = numpy.random.uniform(0.1, 10, self.shape).astype(numpy.float32)
self.params = {'lam': lam}
self.scipy_params = {'mu': lam}
self.continuous = False
self.support = 'non negative integer'
def sample_for_test(self):
smp = numpy.random.randint(
0, 10, self.sample_shape + self.shape).astype(numpy.int32)
return smp
testing.run_module(__name__, __file__)
| 1,072
| 25.170732
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_beta.py
|
import numpy
from chainer import distributions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestBeta(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Beta
self.scipy_dist = stats.beta
self.test_targets = set([
'batch_shape', 'entropy', 'event_shape', 'log_prob', 'mean',
'sample', 'support', 'variance'])
a = numpy.random.uniform(0, 10, self.shape).astype(numpy.float32)
b = numpy.random.uniform(0, 10, self.shape).astype(numpy.float32)
self.params = {'a': a, 'b': b}
self.scipy_params = {'a': a, 'b': b}
self.support = '[0, 1]'
def sample_for_test(self):
smp = numpy.random.uniform(
size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
testing.run_module(__name__, __file__)
| 1,105
| 25.97561
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_bernoulli.py
|
import unittest
import numpy
from chainer import backend
from chainer.backends import cuda
from chainer import distributions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3, 2), (1,)],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
'extreme_values': [True, False],
'binary_check': [True, False],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestBernoulli(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Bernoulli
self.scipy_dist = stats.bernoulli
self.options = {'binary_check': self.binary_check}
self.test_targets = set([
'batch_shape', 'entropy', 'log_prob', 'mean', 'prob', 'sample',
'stddev', 'support', 'variance'])
if self.extreme_values:
p = numpy.random.randint(0, 2, self.shape).astype(numpy.float32)
else:
p = numpy.random.uniform(0, 1, self.shape).astype(numpy.float32)
self.params = {'p': p}
self.scipy_params = {'p': p}
self.support = '{0, 1}'
self.continuous = False
self.old_settings = None
if self.extreme_values:
self.old_settings = numpy.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
if self.old_settings is not None:
numpy.seterr(**self.old_settings)
def sample_for_test(self):
smp = numpy.random.randint(
2, size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
def sample_for_binary_check_test(self):
smp = numpy.random.uniform(
low=0.1, high=0.9,
size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
def check_log_prob_binary_check(self, is_gpu):
smp = self.sample_for_binary_check_test()
if is_gpu:
log_prob = self.gpu_dist.log_prob(cuda.to_gpu(smp)).data
else:
log_prob = self.cpu_dist.log_prob(smp).data
xp = backend.get_array_module(log_prob)
if self.binary_check:
self.assertTrue(xp.all(log_prob == -xp.inf))
else:
self.assertTrue(xp.all(xp.isfinite(log_prob)))
def test_log_prob_binary_check_cpu(self):
self.check_log_prob_binary_check(False)
@attr.gpu
def test_log_prob_binary_check_gpu(self):
self.check_log_prob_binary_check(True)
def check_prob_binary_check(self, is_gpu):
smp = self.sample_for_binary_check_test()
if is_gpu:
prob = self.gpu_dist.prob(cuda.to_gpu(smp)).data
else:
prob = self.cpu_dist.prob(smp).data
xp = backend.get_array_module(prob)
if self.binary_check:
self.assertTrue(xp.all(prob == 0))
else:
self.assertTrue(xp.all(prob > 0))
def test_prob_binary_check_cpu(self):
self.check_prob_binary_check(False)
@attr.gpu
def test_prob_binary_check_gpu(self):
self.check_prob_binary_check(True)
@testing.parameterize(*testing.product({
'logit_shape,x_shape': [
[(2, 3), (2, 3)],
[(), ()],
[(), (3,)]
],
'dtype': [numpy.float32, numpy.float64],
}))
class TestBernoulliLogProb(unittest.TestCase):
def setUp(self):
self.logit = numpy.random.normal(
size=self.logit_shape).astype(self.dtype)
self.x = numpy.random.randint(
0, 2, size=self.x_shape).astype(self.dtype)
self.gy = numpy.random.normal(size=self.x_shape).astype(self.dtype)
self.ggx = numpy.random.normal(
size=self.logit_shape).astype(self.dtype)
self.backward_options = {'atol': 1e-2, 'rtol': 1e-2}
def check_forward(self, logit_data, x_data):
distributions.bernoulli._bernoulli_log_prob(logit_data, x_data)
def test_forward_cpu(self):
self.check_forward(self.logit, self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.logit), cuda.to_gpu(self.x))
def check_backward(self, logit_data, x_data, y_grad):
def f(logit):
return distributions.bernoulli._bernoulli_log_prob(
logit, x_data)
gradient_check.check_backward(
f, logit_data, y_grad, **self.backward_options)
def test_backward_cpu(self):
self.check_backward(self.logit, self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.logit), cuda.to_gpu(self.x),
cuda.to_gpu(self.gy))
def check_double_backward(self, logit_data, x_data, y_grad, x_grad_grad):
def f(logit):
return distributions.bernoulli._bernoulli_log_prob(
logit, x_data)
gradient_check.check_double_backward(
f, logit_data, y_grad, x_grad_grad, dtype=numpy.float64,
**self.backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.logit, self.x, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.logit), cuda.to_gpu(self.x),
cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
def test_backward_where_logit_has_infinite_values(self):
self.logit[...] = numpy.inf
with numpy.errstate(invalid='ignore'):
log_prob = distributions.bernoulli._bernoulli_log_prob(
self.logit, self.x)
# just confirm that the backward method runs without raising error.
log_prob.backward()
testing.run_module(__name__, __file__)
| 5,780
| 31.477528
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_gamma.py
|
import numpy
from chainer import distributions
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestGamma(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Gamma
self.scipy_dist = stats.gamma
self.test_targets = set(
['batch_shape', 'entropy', 'event_shape', 'log_prob', 'mean',
'sample', 'support', 'variance'])
k = utils.force_array(
numpy.random.uniform(0, 5, self.shape).astype(numpy.float32))
theta = utils.force_array(
numpy.random.uniform(0, 5, self.shape).astype(numpy.float32))
self.params = {'k': k, 'theta': theta}
self.scipy_params = {'a': k, 'scale': theta}
self.support = 'positive'
def sample_for_test(self):
smp = numpy.random.gamma(
shape=5., size=self.sample_shape + self.shape
).astype(numpy.float32)
return smp
testing.run_module(__name__, __file__)
| 1,236
| 26.488889
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_dirichlet.py
|
import numpy
from chainer import distributions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestDirichlet(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Dirichlet
self.scipy_dist = stats.dirichlet
self.test_targets = set([
'batch_shape', 'entropy', 'event_shape', 'mean', 'sample',
'support', 'variance'])
alpha = numpy.random.uniform(
0, 10, self.shape + (3,)).astype(numpy.float32)
self.params = {'alpha': alpha}
self.scipy_params = {'alpha': alpha}
self.support = '[0, 1]'
self.event_shape = (3,)
def sample_for_test(self):
smp = numpy.random.normal(size=self.shape + (3,)).astype(numpy.float32)
smp = numpy.exp(smp)
smp /= numpy.expand_dims(smp.sum(axis=-1), axis=-1)
return smp
testing.run_module(__name__, __file__)
| 1,151
| 26.428571
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_exponential.py
|
import numpy
from chainer import distributions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestLogNormal(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Exponential
self.scipy_dist = stats.expon
self.test_targets = set([
'batch_shape', 'cdf', 'entropy', 'event_shape', 'icdf', 'log_prob',
'mean', 'sample', 'support', 'variance'])
lam = numpy.exp(numpy.random.uniform(
-1, 1, self.shape)).astype(numpy.float32)
lam = numpy.asarray(lam)
self.params = {'lam': lam}
self.scipy_params = {'scale': 1 / lam}
self.support = 'positive'
def sample_for_test(self):
smp = numpy.exp(numpy.random.normal(
size=self.sample_shape + self.shape)).astype(numpy.float32)
return smp
testing.run_module(__name__, __file__)
| 1,128
| 25.880952
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_pareto.py
|
import numpy
from chainer import cuda
from chainer import distributions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestPareto(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Pareto
self.scipy_dist = stats.pareto
self.test_targets = set([
'batch_shape', 'entropy', 'event_shape', 'log_prob',
'mean', 'support', 'variance'])
scale = numpy.exp(numpy.random.uniform(
-1, 1, self.shape)).astype(numpy.float32)
alpha = numpy.exp(numpy.random.uniform(
-1, 1, self.shape)).astype(numpy.float32)
scale, alpha = numpy.asarray(scale), numpy.asarray(alpha)
self.params = {'scale': scale, 'alpha': alpha}
self.scipy_params = {'scale': scale, 'b': alpha}
self.support = '[scale, inf]'
def sample_for_test(self):
smp = numpy.random.pareto(
a=1, size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
def check_sample(self, is_gpu):
if is_gpu:
smp1 = self.gpu_dist.sample(
sample_shape=(100000,)+self.sample_shape).data
smp1 = cuda.to_cpu(smp1)
else:
smp1 = self.cpu_dist.sample(
sample_shape=(100000,)+self.sample_shape).data
smp2 = self.scipy_dist.rvs(
size=(100000,)+self.sample_shape+self.shape, **self.scipy_params)
testing.assert_allclose(numpy.median(smp1, axis=0),
numpy.median(smp2, axis=0),
atol=3e-2, rtol=3e-2)
def test_sample_cpu(self):
self.check_sample(False)
@testing.attr.gpu
def test_sample_gpu(self):
self.check_sample(True)
testing.run_module(__name__, __file__)
| 2,040
| 29.924242
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_kldivergence.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import distributions
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3, 2), (1,)],
'is_variable': [True, False],
}))
@testing.fix_random()
class TestKLDivergence(unittest.TestCase):
def check_kl(self, dist1, dist2):
kl = chainer.kl_divergence(dist1, dist2).data
if isinstance(kl, cuda.ndarray):
kl = kl.get()
sample = dist1.sample(300000)
mc_kl = dist1.log_prob(sample).data - dist2.log_prob(sample).data
if isinstance(mc_kl, cuda.ndarray):
mc_kl = mc_kl.get()
mc_kl = numpy.nanmean(mc_kl, axis=0)
testing.assert_allclose(kl, mc_kl, atol=1e-2, rtol=1e-2)
def encode_params(self, params, is_gpu=False):
if is_gpu:
params = {k: cuda.to_gpu(v) for k, v in params.items()}
if self.is_variable:
params = {k: chainer.Variable(v) for k, v in params.items()}
return params
def make_bernoulli_dist(self, is_gpu=False):
p = numpy.random.uniform(0, 1, self.shape).astype(numpy.float32)
params = self.encode_params({'p': p}, is_gpu)
return distributions.Bernoulli(**params)
def make_beta_dist(self, is_gpu=False):
a = numpy.random.uniform(0.5, 10, self.shape).astype(numpy.float32)
b = numpy.random.uniform(0.5, 10, self.shape).astype(numpy.float32)
params = self.encode_params({'a': a, 'b': b}, is_gpu)
return distributions.Beta(**params)
def make_categorical_dist(self, is_gpu=False):
p = numpy.random.normal(size=self.shape+(3,)).astype(numpy.float32)
p = numpy.exp(p)
p /= numpy.expand_dims(p.sum(axis=-1), axis=-1)
params = self.encode_params({'p': p}, is_gpu)
return distributions.Categorical(**params)
def make_dirichlet_dist(self, is_gpu=False):
alpha = numpy.random.uniform(
0.5, 10, self.shape + (3,)).astype(numpy.float32)
params = self.encode_params({'alpha': alpha}, is_gpu)
return distributions.Dirichlet(**params)
def make_exponential_dist(self, is_gpu=False):
lam = numpy.exp(
numpy.random.uniform(0, 0.5, self.shape)).astype(numpy.float32)
params = self.encode_params({'lam': lam}, is_gpu)
return distributions.Exponential(**params)
def make_gamma_dist(self, is_gpu=False):
k = numpy.random.uniform(1, 5, self.shape).astype(numpy.float32)
theta = numpy.random.uniform(0, 2, self.shape).astype(numpy.float32)
params = self.encode_params({'k': k, 'theta': theta}, is_gpu)
return distributions.Gamma(**params)
def make_geometric_dist(self, is_gpu=False):
p = numpy.random.uniform(0, 1, self.shape).astype(numpy.float32)
params = self.encode_params({'p': p}, is_gpu)
return distributions.Geometric(**params)
def make_gumbel_dist(self, is_gpu=False):
loc = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
scale = numpy.exp(
numpy.random.uniform(0, 1, self.shape)).astype(numpy.float32)
params = self.encode_params({'loc': loc, 'scale': scale}, is_gpu)
return distributions.Gumbel(**params)
def make_laplace_dist(self, is_gpu=False):
loc = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
scale = numpy.exp(
numpy.random.uniform(-1, 1, self.shape)).astype(numpy.float32)
params = self.encode_params({'loc': loc, 'scale': scale}, is_gpu)
return distributions.Laplace(**params)
def make_log_normal_dist(self, is_gpu=False):
mu = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
sigma = numpy.exp(
numpy.random.uniform(-1, 1, self.shape)).astype(numpy.float32)
params = self.encode_params({'mu': mu, 'sigma': sigma}, is_gpu)
return distributions.LogNormal(**params)
def make_normal_dist(self, is_gpu=False, use_log_scale=False):
loc = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
if use_log_scale:
log_scale = numpy.random.uniform(
-1, 1, self.shape).astype(numpy.float32)
params = self.encode_params(
{'loc': loc, 'log_scale': log_scale}, is_gpu)
else:
scale = numpy.exp(
numpy.random.uniform(-1, 1, self.shape)).astype(numpy.float32)
params = self.encode_params({'loc': loc, 'scale': scale}, is_gpu)
return distributions.Normal(**params)
def make_multivariatenormal_dist(self, is_gpu=False):
loc = numpy.random.uniform(
-1, 1, self.shape + (3,)).astype(numpy.float32)
cov = numpy.random.normal(size=(numpy.prod(self.shape),) + (3, 3))
cov = [cov_.dot(cov_.T) for cov_ in cov]
cov = numpy.vstack(cov).reshape(self.shape + (3, 3))
scale_tril = numpy.linalg.cholesky(cov).astype(numpy.float32)
params = self.encode_params(
{'loc': loc, 'scale_tril': scale_tril}, is_gpu)
return distributions.MultivariateNormal(**params)
def make_one_hot_categorical_dist(self, is_gpu=False):
p = numpy.random.normal(size=self.shape+(3,)).astype(numpy.float32)
p = numpy.exp(p)
p /= numpy.expand_dims(p.sum(axis=-1), axis=-1)
params = self.encode_params({'p': p}, is_gpu)
return distributions.OneHotCategorical(**params)
def make_pareto_dist(self, is_gpu=False):
scale = numpy.exp(numpy.random.uniform(
0.5, 1, self.shape)).astype(numpy.float32)
alpha = numpy.exp(numpy.random.uniform(
1, 2, self.shape)).astype(numpy.float32)
params = self.encode_params({'scale': scale, 'alpha': alpha}, is_gpu)
return distributions.Pareto(**params)
def make_poisson_dist(self, is_gpu=False):
lam = numpy.random.uniform(5, 10, self.shape).astype(numpy.float32)
params = self.encode_params({'lam': lam}, is_gpu)
return distributions.Poisson(**params)
def make_uniform_dist(self, is_gpu=False, low=None, high=None,
loc=None, scale=None, use_loc_scale=False):
if use_loc_scale:
if loc is None:
loc = numpy.random.uniform(
-3, 0, self.shape).astype(numpy.float32)
if scale is None:
scale = numpy.random.uniform(
1, 5, self.shape).astype(numpy.float32)
params = self.encode_params({'loc': loc, 'scale': scale}, is_gpu)
else:
if low is None:
low = numpy.random.uniform(
-3, 0, self.shape).astype(numpy.float32)
if high is None:
high = numpy.random.uniform(
low + 1, low + 6, self.shape).astype(numpy.float32)
params = self.encode_params({'low': low, 'high': high}, is_gpu)
return distributions.Uniform(**params)
def test_bernoulli_bernoulli_cpu(self):
dist1 = self.make_bernoulli_dist()
dist2 = self.make_bernoulli_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_bernoulli_bernoulli_gpu(self):
dist1 = self.make_bernoulli_dist(True)
dist2 = self.make_bernoulli_dist(True)
self.check_kl(dist1, dist2)
@testing.with_requires('scipy')
def test_beta_beta_cpu(self):
dist1 = self.make_beta_dist()
dist2 = self.make_beta_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_beta_beta_gpu(self):
dist1 = self.make_beta_dist(True)
dist2 = self.make_beta_dist(True)
self.check_kl(dist1, dist2)
@testing.with_requires('numpy>=1.11')
def test_categorical_categorical_cpu(self):
dist1 = self.make_categorical_dist()
dist2 = self.make_categorical_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_categorical_categorical_gpu(self):
dist1 = self.make_categorical_dist(True)
dist2 = self.make_categorical_dist(True)
self.check_kl(dist1, dist2)
@testing.with_requires('scipy')
def test_dirichlet_dirichlet_cpu(self):
dist1 = self.make_dirichlet_dist()
dist2 = self.make_dirichlet_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_dirichlet_dirichlet_gpu(self):
dist1 = self.make_dirichlet_dist(True)
dist2 = self.make_dirichlet_dist(True)
self.check_kl(dist1, dist2)
def test_exponential_exponential_cpu(self):
dist1 = self.make_exponential_dist()
dist2 = self.make_exponential_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_exponential_exponential_gpu(self):
dist1 = self.make_exponential_dist(True)
dist2 = self.make_exponential_dist(True)
self.check_kl(dist1, dist2)
@testing.with_requires('scipy')
def test_gamma_gamma_cpu(self):
dist1 = self.make_gamma_dist()
dist2 = self.make_gamma_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_gamma_gamma_gpu(self):
dist1 = self.make_gamma_dist(True)
dist2 = self.make_gamma_dist(True)
self.check_kl(dist1, dist2)
def test_geometric_geometric_cpu(self):
dist1 = self.make_geometric_dist()
dist2 = self.make_geometric_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_geometric_geometric_gpu(self):
dist1 = self.make_geometric_dist(True)
dist2 = self.make_geometric_dist(True)
self.check_kl(dist1, dist2)
@testing.with_requires('scipy')
def test_gumbel_gumbel_cpu(self):
dist1 = self.make_gumbel_dist()
dist2 = self.make_gumbel_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_gumbel_gumbel_gpu(self):
dist1 = self.make_gumbel_dist(True)
dist2 = self.make_gumbel_dist(True)
self.check_kl(dist1, dist2)
def test_laplace_laplace_cpu(self):
dist1 = self.make_laplace_dist()
dist2 = self.make_laplace_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_laplace_laplace_gpu(self):
dist1 = self.make_laplace_dist(True)
dist2 = self.make_laplace_dist(True)
self.check_kl(dist1, dist2)
def test_log_normal_log_normal_cpu(self):
dist1 = self.make_log_normal_dist()
dist2 = self.make_log_normal_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_log_normal_log_normal_gpu(self):
dist1 = self.make_log_normal_dist(True)
dist2 = self.make_log_normal_dist(True)
self.check_kl(dist1, dist2)
def test_normal_normal_cpu(self):
for use_log_scale1 in [True, False]:
for use_log_scale2 in [True, False]:
dist1 = self.make_normal_dist(use_log_scale=use_log_scale1)
dist2 = self.make_normal_dist(use_log_scale=use_log_scale2)
self.check_kl(dist1, dist2)
@attr.gpu
def test_normal_normal_gpu(self):
for use_log_scale1 in [True, False]:
for use_log_scale2 in [True, False]:
dist1 = self.make_normal_dist(
True, use_log_scale=use_log_scale1)
dist2 = self.make_normal_dist(
True, use_log_scale=use_log_scale2)
self.check_kl(dist1, dist2)
@testing.with_requires('scipy')
def test_multivariatenormal_multivariatenormal_cpu(self):
dist1 = self.make_multivariatenormal_dist()
dist2 = self.make_multivariatenormal_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_multivariatenormal_multivariatenormal_gpu(self):
dist1 = self.make_multivariatenormal_dist(True)
dist2 = self.make_multivariatenormal_dist(True)
self.check_kl(dist1, dist2)
def test_one_hot_categorical_one_hot_categorical_cpu(self):
dist1 = self.make_one_hot_categorical_dist()
dist2 = self.make_one_hot_categorical_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_one_hot_categorical_one_hot_categorical_gpu(self):
dist1 = self.make_one_hot_categorical_dist(True)
dist2 = self.make_one_hot_categorical_dist(True)
self.check_kl(dist1, dist2)
def test_pareto_pareto_cpu(self):
dist1 = self.make_pareto_dist()
dist2 = self.make_pareto_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_pareto_pareto_gpu(self):
dist1 = self.make_pareto_dist(True)
dist2 = self.make_pareto_dist(True)
self.check_kl(dist1, dist2)
@testing.with_requires('scipy')
def test_poisson_poisson_cpu(self):
dist1 = self.make_poisson_dist()
dist2 = self.make_poisson_dist()
self.check_kl(dist1, dist2)
@attr.gpu
def test_poisson_poisson_gpu(self):
dist1 = self.make_poisson_dist(True)
dist2 = self.make_poisson_dist(True)
self.check_kl(dist1, dist2)
def test_uniform_uniform_cpu(self):
for use_loc_scale1 in [True, False]:
for use_loc_scale2 in [True, False]:
dist1 = self.make_uniform_dist(use_loc_scale=use_loc_scale1)
dist2 = self.make_uniform_dist(use_loc_scale=use_loc_scale2)
self.check_kl(dist1, dist2)
@attr.gpu
def test_uniform_uniform_gpu(self):
for use_loc_scale1 in [True, False]:
for use_loc_scale2 in [True, False]:
dist1 = self.make_uniform_dist(
True, use_loc_scale=use_loc_scale1)
dist2 = self.make_uniform_dist(
True, use_loc_scale=use_loc_scale2)
self.check_kl(dist1, dist2)
testing.run_module(__name__, __file__)
| 13,849
| 37.049451
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_uniform.py
|
import numpy
from chainer import distributions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
'use_loc_scale': [True, False],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestUniform(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Uniform
self.scipy_dist = stats.uniform
self.test_targets = set([
'batch_shape', 'cdf', 'entropy', 'event_shape', 'icdf', 'log_prob',
'mean', 'sample', 'stddev', 'support', 'variance'])
if self.use_loc_scale:
loc = numpy.random.uniform(
-10, 0, self.shape).astype(numpy.float32)
scale = numpy.random.uniform(
0, 10, self.shape).astype(numpy.float32)
self.params = {'loc': loc, 'scale': scale}
self.scipy_params = {'loc': loc, 'scale': scale}
else:
low = numpy.random.uniform(
-10, 0, self.shape).astype(numpy.float32)
high = numpy.random.uniform(
low, low + 10, self.shape).astype(numpy.float32)
self.params = {'low': low, 'high': high}
self.scipy_params = {'loc': low, 'scale': high-low}
self.support = '[low, high]'
def sample_for_test(self):
smp = numpy.random.normal(
size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
testing.run_module(__name__, __file__)
| 1,626
| 30.288462
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_gumbel.py
|
import numpy
from chainer import distributions
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestGumbel(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Gumbel
self.scipy_dist = stats.gumbel_r
self.test_targets = set([
'batch_shape', 'entropy', 'event_shape', 'log_prob', 'mean',
'sample', 'support', 'variance'])
loc = utils.force_array(
numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32))
scale = utils.force_array(numpy.exp(
numpy.random.uniform(-1, 1, self.shape)).astype(numpy.float32))
self.params = {'loc': loc, 'scale': scale}
self.scipy_params = {'loc': loc, 'scale': scale}
def sample_for_test(self):
smp = numpy.random.normal(
size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
testing.run_module(__name__, __file__)
| 1,210
| 27.833333
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_log_normal.py
|
import numpy
from chainer import distributions
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestLogNormal(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.LogNormal
self.scipy_dist = stats.lognorm
self.test_targets = set([
'batch_shape', 'entropy', 'event_shape', 'log_prob', 'mean',
'sample', 'support', 'variance'])
mu = utils.force_array(
numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32))
sigma = utils.force_array(numpy.exp(numpy.random.uniform(
-1, 0, self.shape)).astype(numpy.float32))
self.params = {'mu': mu, 'sigma': sigma}
self.scipy_params = {'s': sigma, 'scale': numpy.exp(mu)}
self.support = 'positive'
def sample_for_test(self):
smp = numpy.random.lognormal(
size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
testing.run_module(__name__, __file__)
| 1,258
| 27.613636
| 74
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_multivariate_normal.py
|
import unittest
import numpy
from chainer import cuda
from chainer import distributions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3, 2), (1,)],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
'event_shape': [(3,)],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestMultivariateNormal(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.MultivariateNormal
self.scipy_dist = stats.multivariate_normal
self.scipy_onebyone = True
d, = self.event_shape
self.test_targets = set([
'batch_shape', 'entropy', 'event_shape', 'log_prob',
'support'])
loc = numpy.random.uniform(
-1, 1, self.shape + self.event_shape).astype(numpy.float32)
cov = numpy.random.normal(
size=(numpy.prod(self.shape),) + (d, d))
cov = [cov_.dot(cov_.T) for cov_ in cov]
cov = numpy.vstack(cov).reshape(self.shape + (d, d))
scale_tril = numpy.linalg.cholesky(cov).astype(numpy.float32)
self.params = {'loc': loc, 'scale_tril': scale_tril}
self.scipy_params = {'mean': loc, 'cov': cov}
def test_value_error(self):
with self.assertRaises(ValueError):
self.dist(loc=self.params['loc'])
def sample_for_test(self):
smp = numpy.random.normal(
size=self.sample_shape + self.shape + self.event_shape
).astype(numpy.float32)
return smp
@testing.parameterize(*testing.product({
'd': [3, 1, 0],
'lower': [True, False],
'dtype': [numpy.float32],
}))
@testing.with_requires('scipy')
class TestTriangularInv(unittest.TestCase):
def setUp(self):
while True:
self.x = numpy.random.normal(
0, 10, size=(self.d, self.d)).astype(self.dtype)
self.x = numpy.tril(self.x)
if (abs(self.x.diagonal()) > 0.1).all():
break
if not self.lower:
self.x = self.x.T
self.gy = numpy.random.normal(size=(self.d, self.d)).astype(self.dtype)
self.ggy = numpy.random.normal(
size=(self.d, self.d)).astype(self.dtype)
self.backward_options = {'atol': 1e-2, 'rtol': 1e-2}
self.double_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
def check_forward(self, x_data):
y = distributions.multivariate_normal._triangular_inv(
x_data, lower=self.lower)
inv_x = numpy.linalg.inv(cuda.to_cpu(x_data))
testing.assert_allclose(y.array, inv_x)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
def f(x):
return distributions.multivariate_normal._triangular_inv(
x, lower=self.lower)
gradient_check.check_backward(
f, x_data, y_grad, **self.backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, x_grad_grad):
def f(x):
return distributions.multivariate_normal._triangular_inv(
x, lower=self.lower)
gradient_check.check_double_backward(
f, x_data, y_grad, x_grad_grad,
**self.double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggy)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggy))
@testing.parameterize(*testing.product({
'd': [3, 5],
'lower': [True, False],
'dtype': [numpy.float32],
}))
@testing.without_requires('scipy')
class TestTriangularInvExceptions(unittest.TestCase):
def setUp(self):
self.x = numpy.random.normal(
0, 10, size=(self.d, self.d)).astype(self.dtype)
self.x = numpy.tril(self.x)
if not self.lower:
self.x = self.x.T
def check_forward(self, x_data):
with self.assertRaises(ImportError):
distributions.multivariate_normal._triangular_inv(
x_data, lower=self.lower)
def test_forward_cpu(self):
self.check_forward(self.x)
testing.run_module(__name__, __file__)
| 4,670
| 30.77551
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_geometric.py
|
import numpy
from chainer import distributions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestGeometric(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Geometric
self.scipy_dist = stats.geom
self.test_targets = set([
'batch_shape', 'event_shape', 'log_prob', 'mean', 'sample',
'support', 'variance'])
p = numpy.random.uniform(0, 1, self.shape).astype(numpy.float32)
self.params = {'p': p}
self.scipy_params = {'p': p}
self.support = 'positive integer'
self.continuous = False
def sample_for_test(self):
smp = numpy.random.randint(
1, 10, self.sample_shape + self.shape).astype(numpy.int32)
return smp
testing.run_module(__name__, __file__)
| 1,055
| 24.756098
| 72
|
py
|
chainer
|
chainer-master/tests/chainer_tests/distributions_tests/test_utils.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import distributions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'dtype': [numpy.float32, numpy.float64],
}))
class TestModifiedXLogX(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(
0.1, 1, size=self.shape).astype(self.dtype)
self.zero_x = numpy.zeros(shape=self.shape).astype(self.dtype)
self.gy = numpy.random.normal(size=self.shape).astype(self.dtype)
self.ggx = numpy.random.normal(size=self.shape).astype(self.dtype)
self.backward_options = {'atol': 1e-2, 'rtol': 1e-2, 'eps': 1e-5}
def check_forward(self, x_data):
distributions.utils._modified_xlogx(x_data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
distributions.utils._modified_xlogx,
x_data, y_grad, dtype=numpy.float64, **self.backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, x_grad_grad):
gradient_check.check_double_backward(
distributions.utils._modified_xlogx, x_data, y_grad,
x_grad_grad, dtype=numpy.float64, **self.backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
def check_backward_zero_input(self, x_data):
x = chainer.Variable(x_data)
y = distributions.utils._modified_xlogx(x)
if numpy.prod(y.shape) > 1:
y = chainer.functions.sum(y)
with testing.assert_warns(RuntimeWarning):
y.backward()
def test_backward_zero_input_cpu(self):
self.check_backward_zero_input(self.zero_x)
@attr.gpu
def test_backward_zero_input_gpu(self):
self.check_backward_zero_input(cuda.to_gpu(self.zero_x))
testing.run_module(__name__, __file__)
| 2,540
| 31.164557
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/iterators_tests/test_iterator_compatibility.py
|
from __future__ import division
import itertools
import unittest
from chainer import iterators
from chainer import serializers
from chainer import testing
@testing.parameterize(*testing.product({
'n_prefetch': [1, 2],
'shared_mem': [None, 1000000],
}))
class TestIteratorCompatibility(unittest.TestCase):
def setUp(self):
self.n_processes = 2
self.options = {'n_processes': self.n_processes,
'n_prefetch': self.n_prefetch,
'shared_mem': self.shared_mem}
def test_iterator_compatibilty(self):
dataset = [1, 2, 3, 4, 5, 6]
iters = (
lambda: iterators.SerialIterator(dataset, 2),
lambda: iterators.MultiprocessIterator(dataset, 2, **self.options),
)
for it_before, it_after in itertools.permutations(iters, 2):
it = it_before()
self.assertEqual(it.epoch, 0)
self.assertAlmostEqual(it.epoch_detail, 0 / 6)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 2 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
target = dict()
it.serialize(serializers.DictionarySerializer(target))
it = it_after()
it.serialize(serializers.NpzDeserializer(target))
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
self.assertAlmostEqual(it.epoch_detail, 6 / 6)
testing.run_module(__name__, __file__)
| 2,074
| 31.936508
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/iterators_tests/test_multiprocess_iterator.py
|
from __future__ import division
import copy
import errno
import os
import pickle
import platform
import signal
import subprocess
import sys
import tempfile
import threading
import time
import unittest
import numpy
import six
from chainer import iterators
from chainer import serializers
from chainer import testing
from chainer.testing import attr
class BaseTestMultiprocessIterator(object):
def setUp(self):
self.n_processes = 2
self.options = {'n_processes': self.n_processes,
'n_prefetch': self.n_prefetch,
'shared_mem': self.shared_mem,
'maxtasksperchild': self.maxtasksperchild}
if self.order_sampler is not None:
self.options.update(
{'order_sampler': self.order_sampler})
@unittest.skipIf(platform.system() == 'Windows' and
int(platform.python_version_tuple()[0]) < 3,
'causes timeout in conda with Windows')
def test_iterator_repeat(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i + 0 / 6)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
self.assertAlmostEqual(it.epoch_detail, i + 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 4 / 6)
@unittest.skipIf(platform.system() == 'Windows' and
int(platform.python_version_tuple()[0]) < 3,
'causes timeout in conda with Windows')
def test_iterator_list_type(self):
dataset = [[i, numpy.zeros((10,)) + i] for i in range(6)]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batches = {}
for j in range(3):
batch = it.next()
self.assertEqual(len(batch), 2)
if j != 2:
self.assertFalse(it.is_new_epoch)
else:
self.assertTrue(it.is_new_epoch)
self.assertAlmostEqual(
it.epoch_detail, (3 * i + j + 1) * 2 / 6)
self.assertAlmostEqual(
it.previous_epoch_detail, (3 * i + j) * 2 / 6)
for x in batch:
self.assertIsInstance(x, list)
self.assertIsInstance(x[1], numpy.ndarray)
batches[x[0]] = x[1]
self.assertEqual(len(batches), len(dataset))
for k, v in six.iteritems(batches):
numpy.testing.assert_allclose(dataset[k][1], v)
@unittest.skipIf(platform.system() == 'Windows' and
int(platform.python_version_tuple()[0]) < 3,
'causes timeout in conda with Windows')
def test_iterator_tuple_type(self):
dataset = [(i, numpy.zeros((10,)) + i) for i in range(6)]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batches = {}
for j in range(3):
batch = it.next()
self.assertEqual(len(batch), 2)
if j != 2:
self.assertFalse(it.is_new_epoch)
else:
self.assertTrue(it.is_new_epoch)
self.assertAlmostEqual(
it.epoch_detail, (3 * i + j + 1) * 2 / 6)
self.assertAlmostEqual(
it.previous_epoch_detail, (3 * i + j) * 2 / 6)
for x in batch:
self.assertIsInstance(x, tuple)
self.assertIsInstance(x[1], numpy.ndarray)
batches[x[0]] = x[1]
self.assertEqual(len(batches), len(dataset))
for k, v in six.iteritems(batches):
numpy.testing.assert_allclose(dataset[k][1], v)
@unittest.skipIf(platform.system() == 'Windows' and
int(platform.python_version_tuple()[0]) < 3,
'causes timeout in conda with Windows')
def test_iterator_dict_type(self):
dataset = [{i: numpy.zeros((10,)) + i} for i in range(6)]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batches = {}
for j in range(3):
batch = it.next()
self.assertEqual(len(batch), 2)
if j != 2:
self.assertFalse(it.is_new_epoch)
else:
self.assertTrue(it.is_new_epoch)
self.assertAlmostEqual(
it.epoch_detail, (3 * i + j + 1) * 2 / 6)
self.assertAlmostEqual(
it.previous_epoch_detail, (3 * i + j) * 2 / 6)
for x in batch:
self.assertIsInstance(x, dict)
k = tuple(x)[0]
v = x[k]
self.assertIsInstance(v, numpy.ndarray)
batches[k] = v
self.assertEqual(len(batches), len(dataset))
for k, v in six.iteritems(batches):
x = dataset[k][tuple(dataset[k])[0]]
numpy.testing.assert_allclose(x, v)
@unittest.skipIf(platform.system() == 'Windows' and
int(platform.python_version_tuple()[0]) < 3,
'causes timeout in conda with Windows')
def test_iterator_repeat_not_even(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
batches = sum([it.next() for _ in range(5)], [])
self.assertEqual(sorted(batches), sorted(dataset * 2))
@unittest.skipIf(platform.system() == 'Windows' and
int(platform.python_version_tuple()[0]) < 3,
'causes timeout in conda with Windows')
def test_iterator_not_repeat(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
batches = sum([it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, it.next)
@unittest.skipIf(platform.system() == 'Windows' and
int(platform.python_version_tuple()[0]) < 3,
'causes timeout in conda with Windows')
def test_iterator_not_repeat_not_even(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
self.assertAlmostEqual(it.epoch_detail, 0 / 5)
self.assertIsNone(it.previous_epoch_detail)
batch1 = it.next()
self.assertAlmostEqual(it.epoch_detail, 2 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 5)
batch2 = it.next()
self.assertAlmostEqual(it.epoch_detail, 4 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 5)
batch3 = it.next()
self.assertAlmostEqual(it.epoch_detail, 5 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 5)
self.assertRaises(StopIteration, it.next)
self.assertEqual(len(batch3), 1)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
@unittest.skipIf(platform.system() == 'Windows' and
int(platform.python_version_tuple()[0]) < 3,
'causes timeout in conda with Windows')
def test_iterator_shuffle_divisible(self):
dataset = list(range(10))
it = iterators.MultiprocessIterator(
dataset, 10, **self.options)
self.assertNotEqual(it.next(), it.next())
@unittest.skipIf(platform.system() == 'Windows' and
int(platform.python_version_tuple()[0]) < 3,
'causes timeout in conda with Windows')
def test_iterator_shuffle_nondivisible(self):
dataset = list(range(10))
it = iterators.MultiprocessIterator(
dataset, 3, **self.options)
out = sum([it.next() for _ in range(7)], [])
self.assertNotEqual(out[0:10], out[10:20])
@unittest.skipIf(platform.system() == 'Windows' and
int(platform.python_version_tuple()[0]) < 3,
'causes timeout in conda with Windows')
def test_copy_not_repeat(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
copy_it = copy.copy(it)
batches = sum([it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, it.next)
it = None
batches = sum([copy_it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, copy_it.next)
@unittest.skipIf(platform.system() == 'Windows' and
int(platform.python_version_tuple()[0]) < 3,
'causes timeout in conda with Windows')
def test_reset(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
for trial in range(4):
batches = sum([it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, it.next)
it.reset()
@unittest.skipIf(platform.system() == 'Windows' and
int(platform.python_version_tuple()[0]) < 3,
'causes timeout in conda with Windows')
def test_reset_middle(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
for trial in range(4):
it.next()
it.reset()
batches = sum([it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, it.next)
it.reset()
@unittest.skipIf(platform.system() == 'Windows' and
int(platform.python_version_tuple()[0]) < 3,
'causes timeout in conda with Windows')
def test_reset_repeat(self):
dataset = [1, 2, 3, 4]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=True, **self.options)
for trial in range(4):
batches = sum([it.next() for _ in range(4)], [])
self.assertEqual(sorted(batches), sorted(2 * dataset))
it.reset()
@unittest.skipIf(platform.system() == 'Windows' and
int(platform.python_version_tuple()[0]) < 3,
'causes timeout in conda with Windows')
def test_unsupported_reset_finalized(self):
dataset = [1, 2, 3, 4]
it = iterators.MultiprocessIterator(
dataset, 2, repeat=False, **self.options)
it.next()
it.next()
it.finalize()
self.assertRaises(NotImplementedError, it.reset)
@testing.parameterize(*testing.product({
'n_prefetch': [1, 2],
'shared_mem': [None, 1000000],
'order_sampler': [
None, lambda order, _: numpy.random.permutation(len(order))],
'maxtasksperchild': [None],
}))
class TestMultiprocessIterator(
BaseTestMultiprocessIterator, unittest.TestCase):
pass
@testing.parameterize(*testing.product({
'n_prefetch': [1, 2],
'shared_mem': [None, 1000000],
'order_sampler': [
None, lambda order, _: numpy.random.permutation(len(order))],
'maxtasksperchild': [1, 10],
}))
@attr.slow
class TestMultiprocessIteratorSlow(
BaseTestMultiprocessIterator, unittest.TestCase):
pass
# Pickle does not allow to use lambdas or pure functions
# when serializing the iterator
# work is needed to wrap samplers in classes instead of
# anonymous functions
class PickleSampler(object):
def __call__(self, order, _):
return numpy.random.permutation(len(order))
@testing.parameterize(*testing.product({
'n_prefetch': [1, 2],
'shared_mem': [None, 1000000],
'order_sampler': [None, PickleSampler()]
}))
class TestMultiprocessIteratorPickle(unittest.TestCase):
def setUp(self):
self.n_processes = 2
self.options = {'n_processes': self.n_processes,
'n_prefetch': self.n_prefetch,
'shared_mem': self.shared_mem}
if self.order_sampler is not None:
self.options.update(
{'shuffle': None, 'order_sampler': self.order_sampler})
def test_iterator_pickle_new(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
self.assertEqual(it.epoch, 0)
self.assertAlmostEqual(it.epoch_detail, 0 / 6)
self.assertIsNone(it.previous_epoch_detail)
pickled_it = pickle.dumps(it)
it = pickle.loads(pickled_it)
def test_iterator_pickle_after_init(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
self.assertEqual(it.epoch, 0)
self.assertAlmostEqual(it.epoch_detail, 0 / 6)
self.assertIsNone(it.previous_epoch_detail)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
pickled_it = pickle.dumps(it)
it = pickle.loads(pickled_it)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
self.assertAlmostEqual(it.epoch_detail, 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 6)
@testing.parameterize(*testing.product({
'n_prefetch': [1, 2],
'shared_mem': [None, 1000000],
'order_sampler': [
None, lambda order, _: numpy.random.permutation(len(order))],
}))
class TestMultiprocessIteratorSerialize(unittest.TestCase):
def setUp(self):
self.n_processes = 2
self.options = {'n_processes': self.n_processes,
'n_prefetch': self.n_prefetch,
'shared_mem': self.shared_mem}
if self.order_sampler is not None:
self.options.update(
{'shuffle': None, 'order_sampler': self.order_sampler})
def test_iterator_serialize(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
self.assertEqual(it.epoch, 0)
self.assertAlmostEqual(it.epoch_detail, 0 / 6)
self.assertIsNone(it.previous_epoch_detail)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
target = dict()
it.serialize(serializers.DictionarySerializer(target))
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
it.serialize(serializers.NpzDeserializer(target))
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
self.assertAlmostEqual(it.epoch_detail, 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 6)
def test_iterator_serialize_backward_compat(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
self.assertEqual(it.epoch, 0)
self.assertAlmostEqual(it.epoch_detail, 0 / 6)
self.assertIsNone(it.previous_epoch_detail)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
target = dict()
it.serialize(serializers.DictionarySerializer(target))
# older version does not have previous_epoch_detail
del target['previous_epoch_detail']
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
it.serialize(serializers.NpzDeserializer(target))
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
self.assertAlmostEqual(it.epoch_detail, 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 6)
@testing.parameterize(*testing.product({
'n_prefetch': [1, 2],
'shared_mem': [None, 1000000],
}))
class TestMultiprocessIteratorOrderSamplerEpochSize(unittest.TestCase):
def setUp(self):
def order_sampler(order, cur_pos):
return numpy.repeat(numpy.arange(3), 2)
self.n_processes = 2
self.options = {'n_processes': self.n_processes,
'n_prefetch': self.n_prefetch,
'shared_mem': self.shared_mem,
'shuffle': None,
'order_sampler': order_sampler}
def test_iterator_repeat(self):
dataset = [1, 2, 3]
it = iterators.MultiprocessIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i + 0 / 6)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 4 / 6)
self.assertEqual(
sorted(batch1 + batch2 + batch3), [1, 1, 2, 2, 3, 3])
class _NoSameIndicesOrderSampler(object):
def __init__(self, batchsize):
self.n_call = 0
def __call__(self, current_order, current_pos):
# all batches contain unique indices
remaining = current_order[current_pos:]
first = numpy.setdiff1d(numpy.arange(len(current_order)), remaining)
second = numpy.setdiff1d(numpy.arange(len(current_order)), first)
return numpy.concatenate((first, second))
class TestNoSameIndicesOrderSampler(unittest.TestCase):
def test_no_same_indices_order_sampler(self):
dataset = [1, 2, 3, 4, 5, 6]
batchsize = 5
it = iterators.MultiprocessIterator(
dataset, batchsize,
order_sampler=_NoSameIndicesOrderSampler(batchsize))
for _ in range(5):
batch = it.next()
self.assertEqual(len(numpy.unique(batch)), batchsize)
class _InvalidOrderSampler(object):
def __init__(self):
self.n_call = 0
def __call__(self, _order, _):
order = numpy.arange(len(_order) - self.n_call)
self.n_call += 1
return order
class TestMultiprocessIteratorInvalidOrderSampler(unittest.TestCase):
def test_invalid_order_sampler(self):
dataset = [1, 2, 3, 4, 5, 6]
with self.assertRaises(ValueError):
it = iterators.MultiprocessIterator(
dataset, 6, shuffle=None,
order_sampler=_InvalidOrderSampler())
it.next()
class TestMultiprocessIteratorConcurrency(unittest.TestCase):
def test_finalize_not_deadlock(self):
dataset = numpy.ones((1000, 1000))
it = iterators.MultiprocessIterator(dataset, 10, n_processes=4)
for _ in range(10):
it.next()
t = threading.Thread(target=lambda: it.finalize())
t.daemon = True
t.start()
t.join(5)
deadlock = t.is_alive()
self.assertFalse(deadlock)
class TestMultiprocessIteratorDeterminancy(unittest.TestCase):
def setUp(self):
self._seed = 3141592653
self._random_bak = numpy.random.get_state()
def tearDown(self):
numpy.random.set_state(self._random_bak)
def test_reproduce_same_permutation(self):
dataset = [1, 2, 3, 4, 5, 6]
order_sampler1 = iterators.ShuffleOrderSampler(
numpy.random.RandomState(self._seed))
it1 = iterators.MultiprocessIterator(
dataset, 6, order_sampler=order_sampler1)
order_sampler2 = iterators.ShuffleOrderSampler(
numpy.random.RandomState(self._seed))
it2 = iterators.MultiprocessIterator(
dataset, 6, order_sampler=order_sampler2)
for _ in range(5):
self.assertEqual(it1.next(), it2.next())
@testing.parameterize(*testing.product({
'n_prefetch': [1, 2],
'shared_mem': [None, 1000000],
'order_sampler': [
None, lambda order, _: numpy.random.permutation(len(order))],
}))
class TestMultiprocessIteratorInterruption(unittest.TestCase):
# unless you're debugging tests, this should be false
show_interruption_msg = False
def setUp(self):
self.code_path = None
if not self.show_interruption_msg:
self.nullfd = os.open(os.devnull, os.O_WRONLY)
def tearDown(self):
if not self.show_interruption_msg:
os.close(self.nullfd)
if self.code_path is not None:
os.remove(self.code_path)
def run_code(self, dataset, n_processes, operation):
code_template = """
import os
import random
import sys
import time
from chainer import iterators
# Using `multiprocessing` on Windows Python 2.7 requires
# that the script can be found on `sys.path`.
# See https://bugs.python.org/issue19946
sys.path.append(os.path.dirname(__file__))
class InfiniteWaitDataSet(object):
def __len__(self):
return 1000000
def __getitem__(self, _):
time.sleep(1000000)
infinite_wait = InfiniteWaitDataSet()
class NoWaitDataSet(object):
def __len__(self):
return 1000000
def __getitem__(self, _):
return 0
no_wait = NoWaitDataSet()
if __name__ == '__main__':
if {shared_mem} is not None and {dataset} is infinite_wait:
iterators.MultiprocessIterator._interruption_testing = True
it = iterators.MultiprocessIterator({dataset}, 100,
shuffle={shuffle},
n_processes={n_processes},
n_prefetch={n_prefetch},
shared_mem={shared_mem},
order_sampler={order_sampler})
{operation}
"""
code = code_template.format(dataset=dataset,
shuffle=None,
n_processes=n_processes,
n_prefetch=self.n_prefetch,
shared_mem=self.shared_mem,
order_sampler=self.order_sampler,
operation=operation)
fd, self.code_path = tempfile.mkstemp(suffix='.py')
os.write(fd, six.b(code))
os.close(fd)
if self.shared_mem is not None and dataset == 'infinite_wait':
stdout = subprocess.PIPE
else:
stdout = None
stderr = None if self.show_interruption_msg else self.nullfd
self.p = subprocess.Popen([sys.executable, self.code_path],
stdout=stdout, stderr=stderr)
if stdout is None:
self.child_pids = []
else:
self.child_pids = list(map(int, self.p.stdout.readline().split()))
def send_sigint(self):
# `signal.CTRL_C_EVENT` is also sent to the test process itself.
# See https://docs.python.org/3.6/library/os.html#os.kill
# So we need to wait the signal and ignore it.
# We can NOT ignore the signal by modifying the signal handler here.
# If we temporary ignores the signal, the signal will sent again
# when the signal handler is restored.
# If we ignore the signal permanently, we couldn't interrupt the test.
if os.name == 'nt':
try:
os.kill(self.p.pid, signal.CTRL_C_EVENT)
while True:
pass
except KeyboardInterrupt:
pass
else:
os.kill(self.p.pid, signal.SIGINT)
def killall(self):
# try waiting the root process
# Python 2.7 doesn't have `subprocess.TimeoutExpired`,
# so we couldn't use `p.wait(10)`.
for _ in range(10):
time.sleep(1)
if self.p.poll() is not None:
self.p.wait()
break
pids = [self.p.pid] + self.child_pids
was_alive = False
for pid in pids:
try:
if os.name == 'nt':
os.kill(pid, signal.SIGTERM)
else:
os.kill(pid, signal.SIGKILL)
except OSError as e:
# no such pid (unix)
if e.errno == errno.ESRCH:
pass
# process terminated but its handle remains (Windows)
elif e.errno == errno.EACCES:
pass
# process terminated and its handle erased (Windows)
elif e.errno == errno.EINVAL:
pass
else:
raise
else: # process had existed and successfully killed
was_alive = True
return was_alive
@unittest.skip
def test_interrupt_infinite_wait_batch(self):
# TODO(niboshi): See: https://github.com/chainer/chainer/issues/3383
self.run_code(dataset='infinite_wait',
n_processes=2,
operation='it.next()')
time.sleep(1.5)
self.send_sigint()
self.assertFalse(self.killall())
@unittest.skip
def test_interrupt_no_wait_batch(self):
# TODO(niboshi): See: https://github.com/chainer/chainer/issues/3383
self.run_code(dataset='no_wait',
n_processes=2,
operation='time.sleep(1000)')
time.sleep(1.5)
self.send_sigint()
self.assertFalse(self.killall())
class StallingDataset(object):
def __init__(self, nth, sleep):
self.data = [0, 1, 2, 3, 4]
self.nth = nth
self.sleep = sleep
def __len__(self):
return len(self.data)
def __getitem__(self, i):
if i == self.nth:
time.sleep(self.sleep)
return self.data[i]
@testing.parameterize(*testing.product({
'nth': [0, 1, 2], # A fetch of whatth item will stall?
}))
class TestMultiprocessIteratorStalledDatasetDetection(unittest.TestCase):
def test_stalled_getitem(self):
nth = self.nth
batch_size = 2
sleep = 0.5
timeout = 0.1
dataset = StallingDataset(nth, sleep)
it = iterators.MultiprocessIterator(
dataset, batch_size=batch_size, shuffle=False,
dataset_timeout=timeout, repeat=False)
# TimeoutWarning should be issued.
warning_cls = iterators.MultiprocessIterator.TimeoutWarning
data = []
# No warning until the stalling batch
for i in range(nth // batch_size):
data.append(it.next())
# Warning on the stalling batch
with testing.assert_warns(warning_cls):
data.append(it.next())
# Retrieve data until the end
while True:
try:
data.append(it.next())
except StopIteration:
break
# All data must be retrieved
assert data == [
dataset.data[i * batch_size: (i+1) * batch_size]
for i in range((len(dataset) + batch_size - 1) // batch_size)]
testing.run_module(__name__, __file__)
| 32,393
| 36.755245
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/iterators_tests/test_serial_iterator.py
|
from __future__ import division
import unittest
import numpy
from chainer import iterators
from chainer import serializers
from chainer import testing
class TestSerialIterator(unittest.TestCase):
def test_iterator_repeat(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.SerialIterator(dataset, 2, shuffle=False)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i + 0 / 6)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
self.assertEqual(it.next(), [1, 2])
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 0 / 6)
self.assertEqual(it.next(), [3, 4])
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 2 / 6)
self.assertEqual(it.next(), [5, 6])
self.assertTrue(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 4 / 6)
def test_iterator_repeat_not_even(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.SerialIterator(dataset, 2, shuffle=False)
self.assertEqual(it.epoch, 0)
self.assertAlmostEqual(it.epoch_detail, 0 / 5)
self.assertIsNone(it.previous_epoch_detail)
self.assertEqual(it.next(), [1, 2])
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 2 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 5)
self.assertEqual(it.next(), [3, 4])
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 5)
self.assertEqual(it.next(), [5, 1])
self.assertTrue(it.is_new_epoch)
self.assertEqual(it.epoch, 1)
self.assertAlmostEqual(it.epoch_detail, 6 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 5)
self.assertEqual(it.next(), [2, 3])
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 8 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 6 / 5)
self.assertEqual(it.next(), [4, 5])
self.assertTrue(it.is_new_epoch)
self.assertEqual(it.epoch, 2)
self.assertAlmostEqual(it.epoch_detail, 10 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 8 / 5)
def test_iterator_not_repeat(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.SerialIterator(dataset, 2, repeat=False, shuffle=False)
self.assertAlmostEqual(it.epoch_detail, 0 / 6)
self.assertIsNone(it.previous_epoch_detail)
self.assertEqual(it.next(), [1, 2])
self.assertAlmostEqual(it.epoch_detail, 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 6)
self.assertEqual(it.next(), [3, 4])
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
self.assertEqual(it.next(), [5, 6])
self.assertTrue(it.is_new_epoch)
self.assertEqual(it.epoch, 1)
self.assertAlmostEqual(it.epoch_detail, 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 6)
for i in range(2):
self.assertRaises(StopIteration, it.next)
def test_iterator_not_repeat_not_even(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.SerialIterator(dataset, 2, repeat=False, shuffle=False)
self.assertAlmostEqual(it.epoch_detail, 0 / 5)
self.assertIsNone(it.previous_epoch_detail)
self.assertEqual(it.next(), [1, 2])
self.assertAlmostEqual(it.epoch_detail, 2 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 5)
self.assertEqual(it.next(), [3, 4])
self.assertAlmostEqual(it.epoch_detail, 4 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 5)
self.assertEqual(it.next(), [5])
self.assertTrue(it.is_new_epoch)
self.assertEqual(it.epoch, 1)
self.assertAlmostEqual(it.epoch_detail, 5 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 5)
self.assertRaises(StopIteration, it.next)
@testing.parameterize(
{'order_sampler': None, 'shuffle': True},
{'order_sampler': lambda order, _: numpy.random.permutation(len(order)),
'shuffle': None}
)
class TestSerialIteratorShuffled(unittest.TestCase):
def test_iterator_repeat(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.SerialIterator(dataset, 2, shuffle=self.shuffle,
order_sampler=self.order_sampler)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i + 0 / 6)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertTrue(it.is_new_epoch)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
self.assertAlmostEqual(it.epoch_detail, i + 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 4 / 6)
def test_iterator_repeat_not_even(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.SerialIterator(dataset, 2, shuffle=self.shuffle,
order_sampler=self.order_sampler)
batches = sum([it.next() for _ in range(5)], [])
self.assertEqual(sorted(batches[:5]), dataset)
self.assertEqual(sorted(batches[5:]), dataset)
def test_iterator_not_repeat(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.SerialIterator(dataset, 2, repeat=False,
shuffle=self.shuffle,
order_sampler=self.order_sampler)
batches = sum([it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, it.next)
def test_iterator_not_repeat_not_even(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.SerialIterator(dataset, 2, repeat=False,
shuffle=self.shuffle,
order_sampler=self.order_sampler)
self.assertAlmostEqual(it.epoch_detail, 0 / 5)
self.assertIsNone(it.previous_epoch_detail)
batch1 = it.next()
self.assertAlmostEqual(it.epoch_detail, 2 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 5)
batch2 = it.next()
self.assertAlmostEqual(it.epoch_detail, 4 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 5)
batch3 = it.next()
self.assertAlmostEqual(it.epoch_detail, 5 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 5)
self.assertRaises(StopIteration, it.next)
self.assertEqual(len(batch3), 1)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
def test_iterator_shuffle_divisible(self):
dataset = list(range(10))
it = iterators.SerialIterator(dataset, 10, shuffle=self.shuffle,
order_sampler=self.order_sampler)
self.assertNotEqual(it.next(), it.next())
def test_iterator_shuffle_nondivisible(self):
dataset = list(range(10))
it = iterators.SerialIterator(dataset, 3)
out = sum([it.next() for _ in range(7)], [])
self.assertNotEqual(out[0:10], out[10:20])
def test_reset(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.SerialIterator(dataset, 2, repeat=False,
shuffle=self.shuffle,
order_sampler=self.order_sampler)
for trial in range(4):
batches = sum([it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, it.next)
it.reset()
@testing.parameterize(
{'order_sampler': None, 'shuffle': True},
{'order_sampler': lambda order, _: numpy.random.permutation(len(order)),
'shuffle': None}
)
class TestSerialIteratorSerialize(unittest.TestCase):
def test_iterator_serialize(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.SerialIterator(dataset, 2, shuffle=self.shuffle,
order_sampler=self.order_sampler)
self.assertEqual(it.epoch, 0)
self.assertAlmostEqual(it.epoch_detail, 0 / 6)
self.assertIsNone(it.previous_epoch_detail)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
target = dict()
it.serialize(serializers.DictionarySerializer(target))
it = iterators.SerialIterator(dataset, 2)
it.serialize(serializers.NpzDeserializer(target))
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
self.assertAlmostEqual(it.epoch_detail, 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 6)
def test_iterator_serialize_backward_compat(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.SerialIterator(dataset, 2, shuffle=self.shuffle,
order_sampler=self.order_sampler)
self.assertEqual(it.epoch, 0)
self.assertAlmostEqual(it.epoch_detail, 0 / 6)
self.assertIsNone(it.previous_epoch_detail)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
target = dict()
it.serialize(serializers.DictionarySerializer(target))
# older version uses '_order'
target['_order'] = target['order']
del target['order']
# older version does not have previous_epoch_detail
del target['previous_epoch_detail']
it = iterators.SerialIterator(dataset, 2)
it.serialize(serializers.NpzDeserializer(target))
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
self.assertAlmostEqual(it.epoch_detail, 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 6)
class TestSerialIteratorOrderSamplerEpochSize(unittest.TestCase):
def setUp(self):
def order_sampler(order, cur_pos):
return numpy.repeat(numpy.arange(3), 2)
self.options = {'order_sampler': order_sampler}
def test_iterator_repeat(self):
dataset = [1, 2, 3]
it = iterators.SerialIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i + 0 / 6)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 4 / 6)
self.assertEqual(
sorted(batch1 + batch2 + batch3), [1, 1, 2, 2, 3, 3])
class NoSameIndicesOrderSampler(object):
def __init__(self, batchsize):
self.n_call = 0
def __call__(self, current_order, current_pos):
# all batches contain unique indices
remaining = current_order[current_pos:]
first = numpy.setdiff1d(numpy.arange(len(current_order)), remaining)
second = numpy.setdiff1d(numpy.arange(len(current_order)), first)
return numpy.concatenate((first, second))
class TestNoSameIndicesOrderSampler(unittest.TestCase):
def test_no_same_indices_order_sampler(self):
dataset = [1, 2, 3, 4, 5, 6]
batchsize = 5
it = iterators.SerialIterator(
dataset, batchsize,
order_sampler=NoSameIndicesOrderSampler(batchsize))
for _ in range(5):
batch = it.next()
self.assertEqual(len(numpy.unique(batch)), batchsize)
class InvalidOrderSampler(object):
def __init__(self):
self.n_call = 0
def __call__(self, _order, _):
order = numpy.arange(len(_order) - self.n_call)
self.n_call += 1
return order
class TestSerialIteratorInvalidOrderSampler(unittest.TestCase):
def test_invalid_order_sampler(self):
dataset = [1, 2, 3, 4, 5, 6]
with self.assertRaises(ValueError):
it = iterators.SerialIterator(
dataset, 6, order_sampler=InvalidOrderSampler())
it.next()
testing.run_module(__name__, __file__)
| 15,833
| 39.70437
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/iterators_tests/test_multithread_iterator.py
|
from __future__ import division
import copy
import unittest
import numpy
import six
from chainer import iterators
from chainer import serializers
from chainer import testing
@testing.parameterize(*testing.product({
'n_threads': [1, 2],
'order_sampler': [
None, lambda order, _: numpy.random.permutation(len(order))]
}))
class TestMultithreadIterator(unittest.TestCase):
def setUp(self):
self.options = {'n_threads': self.n_threads,
'order_sampler': self.order_sampler}
def test_iterator_repeat(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.MultithreadIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i + 0 / 6)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
self.assertAlmostEqual(it.epoch_detail, i + 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 4 / 6)
def test_iterator_list_type(self):
dataset = [[i, numpy.zeros((10,)) + i] for i in range(6)]
it = iterators.MultithreadIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batches = {}
for j in range(3):
batch = it.next()
self.assertEqual(len(batch), 2)
if j != 2:
self.assertFalse(it.is_new_epoch)
else:
self.assertTrue(it.is_new_epoch)
self.assertAlmostEqual(
it.epoch_detail, (3 * i + j + 1) * 2 / 6)
self.assertAlmostEqual(
it.previous_epoch_detail, (3 * i + j) * 2 / 6)
for x in batch:
self.assertIsInstance(x, list)
self.assertIsInstance(x[1], numpy.ndarray)
batches[x[0]] = x[1]
self.assertEqual(len(batches), len(dataset))
for k, v in six.iteritems(batches):
numpy.testing.assert_allclose(dataset[k][1], v)
def test_iterator_tuple_type(self):
dataset = [(i, numpy.zeros((10,)) + i) for i in range(6)]
it = iterators.MultithreadIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batches = {}
for j in range(3):
batch = it.next()
self.assertEqual(len(batch), 2)
if j != 2:
self.assertFalse(it.is_new_epoch)
else:
self.assertTrue(it.is_new_epoch)
self.assertAlmostEqual(
it.epoch_detail, (3 * i + j + 1) * 2 / 6)
self.assertAlmostEqual(
it.previous_epoch_detail, (3 * i + j) * 2 / 6)
for x in batch:
self.assertIsInstance(x, tuple)
self.assertIsInstance(x[1], numpy.ndarray)
batches[x[0]] = x[1]
self.assertEqual(len(batches), len(dataset))
for k, v in six.iteritems(batches):
numpy.testing.assert_allclose(dataset[k][1], v)
def test_iterator_dict_type(self):
dataset = [{i: numpy.zeros((10,)) + i} for i in range(6)]
it = iterators.MultithreadIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batches = {}
for j in range(3):
batch = it.next()
self.assertEqual(len(batch), 2)
if j != 2:
self.assertFalse(it.is_new_epoch)
else:
self.assertTrue(it.is_new_epoch)
self.assertAlmostEqual(
it.epoch_detail, (3 * i + j + 1) * 2 / 6)
self.assertAlmostEqual(
it.previous_epoch_detail, (3 * i + j) * 2 / 6)
for x in batch:
self.assertIsInstance(x, dict)
k = tuple(x)[0]
v = x[k]
self.assertIsInstance(v, numpy.ndarray)
batches[k] = v
self.assertEqual(len(batches), len(dataset))
for k, v in six.iteritems(batches):
x = dataset[k][tuple(dataset[k])[0]]
numpy.testing.assert_allclose(x, v)
def test_iterator_repeat_not_even(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultithreadIterator(dataset, 2, **self.options)
batches = sum([it.next() for _ in range(5)], [])
self.assertEqual(sorted(batches), sorted(dataset * 2))
def test_iterator_not_repeat(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultithreadIterator(
dataset, 2, repeat=False, **self.options)
batches = sum([it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, it.next)
def test_iterator_not_repeat_not_even(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultithreadIterator(
dataset, 2, repeat=False, **self.options)
self.assertAlmostEqual(it.epoch_detail, 0 / 5)
self.assertIsNone(it.previous_epoch_detail)
batch1 = it.next()
self.assertAlmostEqual(it.epoch_detail, 2 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 5)
batch2 = it.next()
self.assertAlmostEqual(it.epoch_detail, 4 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 5)
batch3 = it.next()
self.assertAlmostEqual(it.epoch_detail, 5 / 5)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 5)
self.assertRaises(StopIteration, it.next)
self.assertEqual(len(batch3), 1)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
def test_iterator_shuffle_divisible(self):
dataset = list(range(10))
it = iterators.MultithreadIterator(
dataset, 10, **self.options)
self.assertNotEqual(it.next(), it.next())
def test_iterator_shuffle_nondivisible(self):
dataset = list(range(10))
it = iterators.MultithreadIterator(
dataset, 3, **self.options)
out = sum([it.next() for _ in range(7)], [])
self.assertNotEqual(out[0:10], out[10:20])
def test_copy_not_repeat(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultithreadIterator(
dataset, 2, repeat=False, **self.options)
copy_it = copy.copy(it)
batches = sum([it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, it.next)
it = None
batches = sum([copy_it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, copy_it.next)
def test_reset(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultithreadIterator(
dataset, 2, repeat=False, **self.options)
for trial in range(4):
batches = sum([it.next() for _ in range(3)], [])
self.assertEqual(sorted(batches), dataset)
for _ in range(2):
self.assertRaises(StopIteration, it.next)
it.reset()
def test_supported_reset_middle(self):
dataset = [1, 2, 3, 4, 5]
it = iterators.MultithreadIterator(
dataset, 2, repeat=False, **self.options)
it.next()
it.reset()
def test_supported_reset_repeat(self):
dataset = [1, 2, 3, 4]
it = iterators.MultithreadIterator(
dataset, 2, repeat=True, **self.options)
it.next()
it.next()
it.reset()
def test_supported_reset_finalized(self):
dataset = [1, 2, 3, 4]
it = iterators.MultithreadIterator(
dataset, 2, repeat=False, **self.options)
it.next()
it.next()
it.finalize()
it.reset()
@testing.parameterize(*testing.product({
'n_threads': [1, 2],
'order_sampler': [
None, lambda order, _: numpy.random.permutation(len(order))]
}))
class TestMultithreadIteratorSerialize(unittest.TestCase):
def setUp(self):
self.options = {'n_threads': self.n_threads,
'order_sampler': self.order_sampler}
def test_iterator_serialize(self):
dataset = [1, 2, 3, 4, 5, 6]
it = iterators.MultithreadIterator(dataset, 2, **self.options)
self.assertEqual(it.epoch, 0)
self.assertAlmostEqual(it.epoch_detail, 0 / 6)
self.assertIsNone(it.previous_epoch_detail)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
target = dict()
it.serialize(serializers.DictionarySerializer(target))
it = iterators.MultithreadIterator(dataset, 2, **self.options)
it.serialize(serializers.NpzDeserializer(target))
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertEqual(sorted(batch1 + batch2 + batch3), dataset)
self.assertAlmostEqual(it.epoch_detail, 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, 4 / 6)
class TestMultithreadIteratorOrderSamplerEpochSize(unittest.TestCase):
def setUp(self):
def order_sampler(order, cur_pos):
return numpy.repeat(numpy.arange(3), 2)
self.options = {'order_sampler': order_sampler}
def test_iterator_repeat(self):
dataset = [1, 2, 3]
it = iterators.MultithreadIterator(dataset, 2, **self.options)
for i in range(3):
self.assertEqual(it.epoch, i)
self.assertAlmostEqual(it.epoch_detail, i + 0 / 6)
if i == 0:
self.assertIsNone(it.previous_epoch_detail)
else:
self.assertAlmostEqual(it.previous_epoch_detail, i - 2 / 6)
batch1 = it.next()
self.assertEqual(len(batch1), 2)
self.assertIsInstance(batch1, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 2 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 0 / 6)
batch2 = it.next()
self.assertEqual(len(batch2), 2)
self.assertIsInstance(batch2, list)
self.assertFalse(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 4 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 2 / 6)
batch3 = it.next()
self.assertEqual(len(batch3), 2)
self.assertIsInstance(batch3, list)
self.assertTrue(it.is_new_epoch)
self.assertAlmostEqual(it.epoch_detail, i + 6 / 6)
self.assertAlmostEqual(it.previous_epoch_detail, i + 4 / 6)
self.assertEqual(
sorted(batch1 + batch2 + batch3), [1, 1, 2, 2, 3, 3])
class NoSameIndicesOrderSampler(object):
def __init__(self, batchsize):
self.n_call = 0
def __call__(self, current_order, current_pos):
# all batches contain unique indices
remaining = current_order[current_pos:]
first = numpy.setdiff1d(numpy.arange(len(current_order)), remaining)
second = numpy.setdiff1d(numpy.arange(len(current_order)), first)
return numpy.concatenate((first, second))
class TestMultithreadIteratorNoSameIndicesOrderSampler(unittest.TestCase):
def test_no_same_indices_order_sampler(self):
dataset = [1, 2, 3, 4, 5, 6]
batchsize = 5
it = iterators.MultithreadIterator(
dataset, batchsize,
order_sampler=NoSameIndicesOrderSampler(batchsize))
for _ in range(5):
batch = it.next()
self.assertEqual(len(numpy.unique(batch)), batchsize)
class InvalidOrderSampler(object):
def __init__(self):
self.n_call = 0
def __call__(self, _order, _):
order = numpy.arange(len(_order) - self.n_call)
self.n_call += 1
return order
class TestMultithreadIteratorInvalidOrderSampler(unittest.TestCase):
def test_invalid_order_sampler(self):
dataset = [1, 2, 3, 4, 5, 6]
with self.assertRaises(ValueError):
it = iterators.MultithreadIterator(
dataset, 6, order_sampler=InvalidOrderSampler())
it.next()
testing.run_module(__name__, __file__)
| 14,934
| 37.002545
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/iterators_tests/__init__.py
| 0
| 0
| 0
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.