python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
|---|---|---|
import numpy as np
import math
import librosa
def generate_waveforms(length, frequencies):
"""
generate sinewaves with given frequencies,
add Hann envelope and store in channel-last layout
"""
n = int(math.ceil(length))
X = np.arange(n, dtype=np.float32)
def window(x):
x = 2 * x / length - 1
np.clip(x, -1, 1, out=x)
return 0.5 * (1 + np.cos(x * math.pi))
wave = np.sin(X[:, np.newaxis] * (np.array(frequencies) * (2 * math.pi)))
return wave * window(X)[:, np.newaxis]
def rosa_resample(input, in_rate, out_rate):
if input.shape[1] == 1:
return librosa.resample(input[:, 0], orig_sr=in_rate, target_sr=out_rate)[:, np.newaxis]
channels = [
librosa.resample(np.array(input[:, c]), orig_sr=in_rate, target_sr=out_rate)
for c in range(input.shape[1])]
ret = np.zeros(shape=[channels[0].shape[0], len(channels)], dtype=channels[0].dtype)
for c, a in enumerate(channels):
ret[:, c] = a
return ret
|
DALI-main
|
dali/test/python/test_audio_decoder_utils.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import nvidia.dali.ops as ops
import nvidia.dali.pipeline as pipeline
import nvidia.dali.plugin.tf as dali_tf
import nvidia.dali.types as dali_types
from test_utils_tensorflow import skip_for_incompatible_tf
import os
from nose.tools import assert_equals
from nose_utils import raises
import itertools
import warnings
try:
tf.compat.v1.enable_eager_execution()
except: # noqa: E722
pass
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
data_path = os.path.join(os.environ['DALI_EXTRA_PATH'], 'db/single/jpeg/')
file_list_path = os.path.join(data_path, 'image_list.txt')
def setup():
skip_for_incompatible_tf()
def dali_pipe_batch_1(shapes, types, as_single_tuple=False):
class TestPipeline(pipeline.Pipeline):
def __init__(self, **kwargs):
super(TestPipeline, self).__init__(**kwargs)
self.reader = ops.readers.File(file_root=data_path, file_list=file_list_path)
self.decoder = ops.decoders.Image(device='mixed')
def define_graph(self):
data, _ = self.reader()
image = self.decoder(data)
return image
pipe = TestPipeline(batch_size=1, seed=0)
ds = dali_tf.DALIDataset(pipe, batch_size=1, output_dtypes=types, output_shapes=shapes)
# for clarity, we could have used the previous `pipe`
pipe_ref = TestPipeline(batch_size=1, seed=0, device_id=0, num_threads=4)
pipe_ref.build()
ds_iter = iter(ds)
# See if the iteration over different images works
if as_single_tuple:
shapes = shapes[0]
for _ in range(10):
image, = ds_iter.next()
image_ref, = pipe_ref.run()
if shapes is None or len(shapes) == 4:
assert_equals(image.shape, ([1] + image_ref[0].shape()))
else:
assert_equals(image.shape, image_ref[0].shape())
def test_batch_1_different_shapes():
for shape in [None, (None, None, None, None), (None, None, None),
(1, None, None, None), (1, None, None, 3), (None, None, 3)]:
yield dali_pipe_batch_1, shape, tf.uint8
yield dali_pipe_batch_1, (shape,), (tf.uint8,), True
def test_batch_1_mixed_tuple():
for shape in [(None, None, None, None), (None, None, None), (1, None, None, None),
(1, None, None, 3), (None, None, 3)]:
yield raises(ValueError, "The two structures don't have the same sequence length.")(
dali_pipe_batch_1), shape, (tf.uint8,)
expected_msg = ("Dimension value must be integer or None * got value * "
"with type '<class 'tuple'>'")
yield raises(TypeError, expected_msg)(dali_pipe_batch_1), (shape,), tf.uint8
def test_batch_1_wrong_shape():
for shape in [(2, None, None, None), (None, None, 4), (2, None, None, 4), (None, 0, None, 3)]:
yield raises(
tf.errors.InvalidArgumentError,
"The shape provided for output `0` is not compatible with the "
"shape returned by DALI Pipeline"
)(dali_pipe_batch_1), shape, tf.uint8
def dali_pipe_batch_N(shapes, types, batch):
class TestPipeline(pipeline.Pipeline):
def __init__(self, **kwargs):
super(TestPipeline, self).__init__(**kwargs)
self.reader = ops.readers.File(file_root=data_path, file_list=file_list_path)
self.decoder = ops.decoders.Image(device='mixed')
self.resize = ops.Resize(device="gpu", resize_x=200, resize_y=200)
def define_graph(self):
data, _ = self.reader()
image = self.decoder(data)
resized = self.resize(image)
return resized
pipe = TestPipeline(batch_size=batch, seed=0)
ds = dali_tf.DALIDataset(pipe, batch_size=batch, output_dtypes=types, output_shapes=shapes)
ds_iter = iter(ds)
for _ in range(10):
image, = ds_iter.next()
if shapes is None or len(shapes) == 4:
assert_equals(image.shape, (batch, 200, 200, 3))
else:
assert_equals(image.shape, (200, 200, 3))
def test_batch_N_valid_shapes():
for batch in [1, 10]:
# No shape
yield dali_pipe_batch_N, None, tf.uint8, batch
# Full shape
output_shape = (batch, 200, 200, 3)
for i in range(2 ** len(output_shape)):
noned_shape = tuple(
(dim if i & (2 ** idx) else None)
for idx, dim in enumerate(output_shape))
yield dali_pipe_batch_N, noned_shape, tf.uint8, batch
# Omitted batch of size `1`
output_shape = (200, 200, 3)
for i in range(2 ** len(output_shape)):
noned_shape = tuple(
(dim if i & (2 ** idx) else None)
for idx, dim in enumerate(output_shape))
yield dali_pipe_batch_N, noned_shape, tf.uint8, 1
def dali_pipe_multiple_out(shapes, types, batch):
class TestPipeline(pipeline.Pipeline):
def __init__(self, **kwargs):
super(TestPipeline, self).__init__(**kwargs)
self.reader = ops.readers.File(file_root=data_path, file_list=file_list_path)
self.decoder = ops.decoders.Image(device='mixed')
self.resize = ops.Resize(device="gpu", resize_x=200, resize_y=200)
def define_graph(self):
data, label = self.reader()
image = self.decoder(data)
resized = self.resize(image)
return resized, label.gpu()
pipe = TestPipeline(batch_size=batch, seed=0)
ds = dali_tf.DALIDataset(pipe, batch_size=batch, output_dtypes=types, output_shapes=shapes)
ds_iter = iter(ds)
for _ in range(10):
image, label = ds_iter.next()
if shapes is None or shapes[0] is None or len(shapes[0]) == 4:
assert_equals(image.shape, (batch, 200, 200, 3))
else:
assert_equals(image.shape, (200, 200, 3))
if shapes is None or shapes[1] is None or len(shapes[1]) == 2:
assert_equals(label.shape, (batch, 1))
else:
assert_equals(label.shape, (batch,))
def test_multiple_input_valid_shapes():
for batch in [1, 10]:
for shapes in [
None,
(None, None),
((batch, 200, 200, 3), None),
(None, (batch, 1)),
(None, (batch,))
]:
yield dali_pipe_multiple_out, shapes, (tf.uint8, tf.int32), batch
def test_multiple_input_invalid():
for batch in [1, 10]:
for shapes in [(None,), (batch, 200, 200, 3, None), (None, None, None)]:
yield raises(ValueError, "The two structures don't have the same sequence length.")(
dali_pipe_multiple_out), shapes, (tf.uint8, tf.uint8), batch
def dali_pipe_artificial_shape(shapes, tf_type, dali_type, batch):
class TestPipeline(pipeline.Pipeline):
def __init__(self, **kwargs):
super(TestPipeline, self).__init__(**kwargs)
self.constant = ops.Constant(dtype=dali_type, idata=[1, 1], shape=[1, 2, 1])
def define_graph(self):
return self.constant().gpu()
pipe = TestPipeline(batch_size=batch, seed=0)
ds = dali_tf.DALIDataset(pipe, batch_size=batch, output_dtypes=tf_type, output_shapes=shapes)
ds_iter = iter(ds)
for _ in range(10):
out, = ds_iter.next()
if len(shapes) == 4:
assert_equals(out.shape, (batch, 1, 2, 1))
if len(shapes) == 3:
assert_equals(out.shape, (batch, 1, 2))
if len(shapes) == 2:
assert_equals(out.shape, (batch, 2,))
if len(shapes) == 1:
assert_equals(out.shape, (2,))
def test_artificial_match():
for batch in [1, 10]:
for shape in [
(None, None, None, None),
(None, None, 2),
(batch, None, None, None),
(batch, None, 2)
]:
yield dali_pipe_artificial_shape, shape, tf.uint8, dali_types.UINT8, batch
yield dali_pipe_artificial_shape, (10, 2), tf.uint8, dali_types.UINT8, 10
yield dali_pipe_artificial_shape, (2,), tf.uint8, dali_types.UINT8, 1
def test_artificial_no_match():
batch = 10
for shape in [(batch + 1, None, None, None), (None, None, 3), (batch, 2, 1, 1)]:
yield raises(
tf.errors.InvalidArgumentError,
"The shape provided for output `0` is not compatible with the "
"shape returned by DALI Pipeline"
)(dali_pipe_artificial_shape), shape, tf.uint8, dali_types.UINT8, batch
def dali_pipe_types(tf_type, dali_type):
class TestPipeline(pipeline.Pipeline):
def __init__(self, **kwargs):
super(TestPipeline, self).__init__(**kwargs)
self.constant = ops.Constant(dtype=dali_type, idata=[1, 1], shape=[2])
def define_graph(self):
return self.constant().gpu()
pipe = TestPipeline(batch_size=1, seed=0)
ds = dali_tf.DALIDataset(pipe, batch_size=1, output_dtypes=tf_type)
ds_iter = iter(ds)
out, = ds_iter.next()
assert_equals(out.dtype, tf_type)
# float64 not tested because constand doesn't support it
tf_type_list = [
tf.uint8, tf.uint16, tf.uint32, tf.uint64,
tf.int8, tf.int16, tf.int32, tf.int64,
tf.bool,
tf.float16, tf.float32
]
dali_type_list = [
dali_types.UINT8, dali_types.UINT16, dali_types.UINT32, dali_types.UINT64,
dali_types.INT8, dali_types.INT16, dali_types.INT32, dali_types.INT64,
dali_types.BOOL,
dali_types.FLOAT16, dali_types.FLOAT
]
matching_types = list(zip(tf_type_list, dali_type_list))
all_types = itertools.product(tf_type_list, dali_type_list)
not_matching_types = list(set(all_types).difference(set(matching_types)))
def test_type_returns():
for tf_t, dali_t in matching_types:
yield dali_pipe_types, tf_t, dali_t
for tf_t, dali_t in not_matching_types:
yield raises(
tf.errors.InvalidArgumentError,
"The type provided for output `0` is not compatible with the type "
"returned by DALI Pipeline"
)(dali_pipe_types), tf_t, dali_t
def dali_pipe_deprecated(dataset_kwargs, shapes, tf_type, dali_type, batch,
expected_warnings_count):
class TestPipeline(pipeline.Pipeline):
def __init__(self, **kwargs):
super(TestPipeline, self).__init__(**kwargs)
self.constant = ops.Constant(dtype=dali_type, idata=[1, 1], shape=[2])
def define_graph(self):
return self.constant().gpu()
pipe = TestPipeline(batch_size=batch, seed=0)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
ds = dali_tf.DALIDataset(pipe, batch_size=batch, **dataset_kwargs)
assert_equals(len(w), expected_warnings_count)
ds_iter = iter(ds)
for _ in range(10):
out, = ds_iter.next()
if isinstance(shapes, int) or len(shapes) == 1:
assert_equals(out.shape, (2,))
else:
assert_equals(out.shape, (batch, 2))
assert_equals(out.dtype, tf_type)
def test_deprecated():
yield dali_pipe_deprecated, \
{"shapes": 2, "dtypes": tf.uint8}, 2, tf.uint8, dali_types.UINT8, 1, 2
yield dali_pipe_deprecated, \
{"shapes": [4, 2], "dtypes": tf.uint8}, [4, 2], tf.uint8, dali_types.UINT8, 4, 2
yield dali_pipe_deprecated, \
{"shapes": [[4, 2]], "dtypes": [tf.uint8]}, [4, 2], tf.uint8, dali_types.UINT8, 4, 2
yield dali_pipe_deprecated, \
{"output_shapes": 2, "dtypes": tf.uint8}, 2, tf.uint8, dali_types.UINT8, 1, 1
yield dali_pipe_deprecated, \
{"output_shapes": (4, 2), "dtypes": tf.uint8}, [4, 2], tf.uint8, dali_types.UINT8, 4, 1
yield dali_pipe_deprecated, \
{"output_shapes": ((4, 2),), "dtypes": [tf.uint8]}, [4, 2], tf.uint8, dali_types.UINT8, 4, 1
yield dali_pipe_deprecated, \
{"shapes": 2, "output_dtypes": tf.uint8}, 2, tf.uint8, dali_types.UINT8, 1, 1
yield dali_pipe_deprecated, \
{"shapes": [4, 2], "output_dtypes": tf.uint8}, [4, 2], tf.uint8, dali_types.UINT8, 4, 1
yield dali_pipe_deprecated, \
{"shapes": [[4, 2]], "output_dtypes": (tf.uint8,)}, [4, 2], tf.uint8, dali_types.UINT8, 4, 1
def test_deprecated_double_def():
error_msg = ("Usage of `{}` is deprecated in favor of `output_{}`*only `output_{}` "
"should be provided.")
shapes_error_msg = error_msg.format(*(("shapes",) * 3))
yield raises(ValueError, shapes_error_msg)(dali_pipe_deprecated), \
{"shapes": 2, "output_shapes": 2, "dtypes": tf.uint8}, 2, tf.uint8, dali_types.UINT8, 1, 2
dtypes_error_msg = error_msg.format(*(("dtypes",) * 3))
yield raises(ValueError, dtypes_error_msg)(dali_pipe_deprecated), \
{"shapes": 2, "dtypes": tf.uint8, "output_dtypes": tf.uint8}, \
2, tf.uint8, dali_types.UINT8, 1, 2
def test_no_output_dtypes():
expected_msg = ("`output_dtypes` should be provided as single tf.DType value or a tuple of "
"tf.DType values")
yield raises(TypeError, expected_msg)(dali_pipe_deprecated), \
{"shapes": 2}, 2, tf.uint8, dali_types.UINT8, 1, 2
|
DALI-main
|
dali/test/python/test_dali_tf_dataset_shape.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali._utils.autoserialize import invoke_autoserialize
from nose_utils import raises
serialized_filename = "/tmp/some_custom_name"
def test_direct_import():
from autoserialize_test import decorated_function
invoke_autoserialize(decorated_function, serialized_filename)
def test_indirect_import():
from autoserialize_test import imports_decorated_function
invoke_autoserialize(imports_decorated_function, serialized_filename)
@raises(RuntimeError, glob="Precisely one autoserialize function must exist in the module.*")
def test_double_decorated_functions():
from autoserialize_test import double_decorated_functions
invoke_autoserialize(double_decorated_functions, serialized_filename)
@raises(TypeError, glob="Only `@pipeline_def` can be decorated with `@triton.autoserialize`.")
def test_improper_decorated_function():
from autoserialize_test import improper_decorated_function
invoke_autoserialize(improper_decorated_function, serialized_filename)
def test_custom_module():
from autoserialize_test import custom_module_inside
invoke_autoserialize(custom_module_inside, serialized_filename)
|
DALI-main
|
dali/test/python/test_triton_autoserialize.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import numpy as np
import tempfile
import scipy
import scipy.io.wavfile
import nvidia.dali.fn as fn
import nvidia.dali.math as dmath
from nvidia.dali.pipeline import pipeline_def
from test_audio_decoder_utils import generate_waveforms
def setup_test_nemo_asr_reader_cpu():
tmp_dir = tempfile.TemporaryDirectory()
dir_name = tmp_dir.name
def create_manifest_file(manifest_file, names, lengths, rates, texts):
assert len(names) == len(lengths) == len(rates) == len(texts)
data = []
for idx in range(len(names)):
entry_i = {}
entry_i['audio_filepath'] = names[idx]
entry_i['duration'] = lengths[idx] * (1.0 / rates[idx])
entry_i["text"] = texts[idx]
data.append(entry_i)
with open(manifest_file, 'w') as f:
for entry in data:
json.dump(entry, f)
f.write('\n')
nemo_asr_manifest = os.path.join(dir_name, "nemo_asr_manifest.json")
names = [
os.path.join(dir_name, "dali_test_1C.wav"),
os.path.join(dir_name, "dali_test_2C.wav"),
os.path.join(dir_name, "dali_test_4C.wav")
]
freqs = [
np.array([0.02]),
np.array([0.01, 0.012]),
np.array([0.01, 0.012, 0.013, 0.014])
]
rates = [22050, 22050, 12347]
lengths = [10000, 54321, 12345]
def create_ref():
ref = []
for i in range(len(names)):
wave = generate_waveforms(lengths[i], freqs[i])
wave = (wave * 32767).round().astype(np.int16)
ref.append(wave)
return ref
ref_i = create_ref()
def create_wav_files():
for i in range(len(names)):
scipy.io.wavfile.write(names[i], rates[i], ref_i[i])
create_wav_files()
ref_text_literal = [
"dali test 1C",
"dali test 2C",
"dali test 4C",
]
nemo_asr_manifest = os.path.join(dir_name, "nemo_asr_manifest.json")
create_manifest_file(nemo_asr_manifest, names, lengths, rates, ref_text_literal)
return tmp_dir, nemo_asr_manifest
def setup_test_numpy_reader_cpu():
tmp_dir = tempfile.TemporaryDirectory()
dir_name = tmp_dir.name
rng = np.random.default_rng(12345)
def create_numpy_file(filename, shape, typ, fortran_order):
# generate random array
arr = rng.random(shape) * 10.
arr = arr.astype(typ)
if fortran_order:
arr = np.asfortranarray(arr)
np.save(filename, arr)
num_samples = 20
filenames = []
for index in range(0, num_samples):
filename = os.path.join(dir_name, "test_{:02d}.npy".format(index))
filenames.append(filename)
create_numpy_file(filename, (5, 2, 8), np.float32, False)
return tmp_dir
@pipeline_def
def pipeline_arithm_ops_cpu(source):
data = fn.external_source(source=source, layout="HWC")
processed = (data * 2,
data + 2,
data - 2,
data / 2,
data // 2,
data ** 2,
data == 2,
data != 2,
data < 2,
data <= 2,
data > 2,
data >= 2,
data & 2,
data | 2,
data ^ 2,
dmath.abs(data),
dmath.fabs(data),
dmath.floor(data),
dmath.ceil(data),
dmath.pow(data, 2),
dmath.fpow(data, 1.5),
dmath.min(data, 2),
dmath.max(data, 50),
dmath.clamp(data, 10, 50),
dmath.sqrt(data),
dmath.rsqrt(data),
dmath.cbrt(data),
dmath.exp(data),
dmath.exp(data),
dmath.log(data),
dmath.log2(data),
dmath.log10(data),
dmath.sin(data),
dmath.cos(data),
dmath.tan(data),
dmath.asin(data),
dmath.acos(data),
dmath.atan(data),
dmath.atan2(data, 3),
dmath.sinh(data),
dmath.cosh(data),
dmath.tanh(data),
dmath.asinh(data),
dmath.acosh(data),
dmath.atanh(data))
return processed
|
DALI-main
|
dali/test/python/test_dali_cpu_only_utils.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import numpy as np
import os
from test_utils import compare_pipelines
from test_utils import RandomDataIterator
import cv2
import glob
test_data_root = os.environ['DALI_EXTRA_PATH']
multichannel_tiff_root = os.path.join(test_data_root, 'db', 'single',
'multichannel', 'tiff_multichannel')
multichannel_tiff_files = glob.glob(multichannel_tiff_root + "/*.tif*")
def crop_func_help(image, layout, crop_y=0.2, crop_x=0.3, crop_h=220, crop_w=224):
if layout == "FHWC":
assert len(image.shape) == 4
H = image.shape[1]
W = image.shape[2]
elif layout == "HWC":
assert len(image.shape) == 3
H = image.shape[0]
W = image.shape[1]
assert H >= crop_h
assert W >= crop_w
start_y = int(np.float32(crop_y) * np.float32(H - crop_h) + np.float32(0.5))
end_y = start_y + crop_h
start_x = int(np.float32(crop_x) * np.float32(W - crop_w) + np.float32(0.5))
end_x = start_x + crop_w
if layout == "FHWC":
return image[:, start_y:end_y, start_x:end_x, :]
elif layout == "HWC":
return image[start_y:end_y, start_x:end_x, :]
else:
assert False # should not happen
def crop_NHWC_func(image):
return crop_func_help(image, "HWC")
def resize_func_help(image, size_x=300, size_y=900):
res = cv2.resize(image, (size_x, size_y))
return res
def resize_func(image):
return resize_func_help(image)
def transpose_func(image):
return image.transpose((1, 0, 2))
def normalize_func(image):
return np.float32(image) / np.float32(255.0)
def full_pipe_func(image):
out = resize_func(image)
out = crop_NHWC_func(out)
out = transpose_func(out)
out = normalize_func(out)
return out
class MultichannelSynthPipeline(Pipeline):
def __init__(self, device, batch_size, layout, iterator,
num_threads=1, device_id=0, tested_operator=None):
super(MultichannelSynthPipeline, self).__init__(batch_size,
num_threads,
device_id)
self.device = device
self.layout = layout
self.iterator = iterator
self.inputs = ops.ExternalSource()
self.tested_operator = tested_operator
if self.tested_operator == 'resize' or not self.tested_operator:
self.resize = ops.Resize(device=self.device,
resize_y=900,
resize_x=300,
min_filter=types.DALIInterpType.INTERP_LINEAR,
antialias=False)
if self.tested_operator == 'crop' or not self.tested_operator:
self.crop = ops.Crop(device=self.device,
crop=(220, 224),
crop_pos_x=0.3,
crop_pos_y=0.2)
if self.tested_operator == 'transpose' or not self.tested_operator:
self.transpose = ops.Transpose(device=self.device,
perm=(1, 0, 2),
transpose_layout=False)
if self.tested_operator == 'normalize' or not self.tested_operator:
self.cmn = ops.CropMirrorNormalize(device=self.device,
std=255.,
mean=0.,
output_layout="HWC",
dtype=types.FLOAT)
def define_graph(self):
self.data = self.inputs()
out = self.data.gpu() if self.device == 'gpu' else self.data
if self.tested_operator == 'resize' or not self.tested_operator:
out = self.resize(out)
if self.tested_operator == 'crop' or not self.tested_operator:
out = self.crop(out)
if self.tested_operator == 'transpose' or not self.tested_operator:
out = self.transpose(out)
if self.tested_operator == 'normalize' or not self.tested_operator:
out = self.cmn(out)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
class MultichannelSynthPythonOpPipeline(Pipeline):
def __init__(self, function, batch_size, layout, iterator, num_threads=1, device_id=0):
super(MultichannelSynthPythonOpPipeline, self).__init__(batch_size,
num_threads,
device_id,
exec_async=False,
exec_pipelined=False)
self.layout = layout
self.iterator = iterator
self.inputs = ops.ExternalSource()
self.oper = ops.PythonFunction(function=function, output_layouts=layout)
def define_graph(self):
self.data = self.inputs()
out = self.oper(self.data)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
def get_numpy_func(tested_operator):
if not tested_operator:
return full_pipe_func
elif tested_operator == 'resize':
return resize_func
elif tested_operator == 'crop':
return crop_NHWC_func
elif tested_operator == 'transpose':
return transpose_func
elif tested_operator == 'normalize':
return normalize_func
else:
assert False
def check_multichannel_synth_data_vs_numpy(tested_operator, device, batch_size, shape):
eii1 = RandomDataIterator(batch_size, shape=shape)
eii2 = RandomDataIterator(batch_size, shape=shape)
mc_pipe = MultichannelSynthPipeline(device, batch_size, "HWC", iter(eii1),
tested_operator=tested_operator)
mc_pipe_python_op = MultichannelSynthPythonOpPipeline(get_numpy_func(tested_operator),
batch_size, "HWC", iter(eii2))
compare_pipelines(mc_pipe, mc_pipe_python_op, batch_size=batch_size, N_iterations=3, eps=0.2)
def test_multichannel_synth_data_vs_numpy():
full_pipeline_case = None
for tested_operator in ['resize', 'crop', 'transpose', 'normalize', full_pipeline_case]:
# TODO(janton): remove when we implement CPU tranpose
supported_devices = ['gpu'] if tested_operator in [None, 'transpose'] else ['cpu', 'gpu']
for device in supported_devices:
for batch_size in {3}:
for shape in {(2048, 512, 8)}:
yield check_multichannel_synth_data_vs_numpy, \
tested_operator, device, batch_size, shape
class MultichannelPipeline(Pipeline):
def __init__(self, device, batch_size, num_threads=1, device_id=0):
super(MultichannelPipeline, self).__init__(batch_size, num_threads, device_id)
self.device = device
self.reader = ops.readers.File(files=multichannel_tiff_files)
decoder_device = 'mixed' if self.device == 'gpu' else 'cpu'
self.decoder = ops.decoders.Image(device=decoder_device, output_type=types.ANY_DATA)
self.resize = ops.Resize(device=self.device,
resize_y=900, resize_x=300,
min_filter=types.DALIInterpType.INTERP_LINEAR,
antialias=False)
self.crop = ops.Crop(device=self.device,
crop_h=220, crop_w=224,
crop_pos_x=0.3, crop_pos_y=0.2)
self.transpose = ops.Transpose(device=self.device,
perm=(1, 0, 2),
transpose_layout=False)
self.cmn = ops.CropMirrorNormalize(device=self.device,
std=255., mean=0.,
output_layout="HWC",
dtype=types.FLOAT)
def define_graph(self):
encoded_data, _ = self.reader()
decoded_data = self.decoder(encoded_data)
out = decoded_data.gpu() if self.device == 'gpu' else decoded_data
out = self.resize(out)
out = self.crop(out)
out = self.transpose(out)
out = self.cmn(out)
return out
class MultichannelPythonOpPipeline(Pipeline):
def __init__(self, function, batch_size, num_threads=1, device_id=0):
super(MultichannelPythonOpPipeline, self).__init__(batch_size,
num_threads,
device_id,
exec_async=False,
exec_pipelined=False)
self.reader = ops.readers.File(files=multichannel_tiff_files)
self.decoder = ops.decoders.Image(device='cpu', output_type=types.ANY_DATA)
self.oper = ops.PythonFunction(function=function, output_layouts="HWC")
def define_graph(self):
encoded_data, _ = self.reader()
decoded_data = self.decoder(encoded_data)
out = self.oper(decoded_data)
return out
def check_full_pipe_multichannel_vs_numpy(device, batch_size):
compare_pipelines(MultichannelPipeline(device, batch_size),
MultichannelPythonOpPipeline(full_pipe_func, batch_size),
batch_size=batch_size, N_iterations=3, eps=1e-03)
def test_full_pipe_multichannel_vs_numpy():
for device in {'cpu', 'gpu'}:
for batch_size in {1, 3}:
yield check_full_pipe_multichannel_vs_numpy, device, batch_size
|
DALI-main
|
dali/test/python/test_pipeline_multichannel.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from math import ceil, sqrt
import numpy as np
import nvidia.dali.ops as ops
import nvidia.dali.types as types
from nvidia.dali.pipeline import Pipeline
from test_utils import get_dali_extra_path
from test_utils import to_array
test_data_root = get_dali_extra_path()
def coco_anchors():
anchors = []
fig_size = 300
feat_sizes = [38, 19, 10, 5, 3, 1]
feat_count = len(feat_sizes)
steps = [8., 16., 32., 64., 100., 300.]
scales = [21., 45., 99., 153., 207., 261., 315.]
aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
fks = []
for step in steps:
fks.append(fig_size / step)
anchor_idx = 0
for idx in range(feat_count):
sk1 = scales[idx] / fig_size
sk2 = scales[idx + 1] / fig_size
sk3 = sqrt(sk1 * sk2)
all_sizes = [[sk1, sk1], [sk3, sk3]]
for alpha in aspect_ratios[idx]:
w = sk1 * sqrt(alpha)
h = sk1 / sqrt(alpha)
all_sizes.append([w, h])
all_sizes.append([h, w])
for sizes in all_sizes:
w = sizes[0]
h = sizes[1]
for i in range(feat_sizes[idx]):
for j in range(feat_sizes[idx]):
cx = (j + 0.5) / fks[idx]
cy = (i + 0.5) / fks[idx]
cx = max(min(cx, 1.), 0.)
cy = max(min(cy, 1.), 0.)
w = max(min(w, 1.), 0.)
h = max(min(h, 1.), 0.)
anchors.append(cx - 0.5 * w)
anchors.append(cy - 0.5 * h)
anchors.append(cx + 0.5 * w)
anchors.append(cy + 0.5 * h)
anchor_idx = anchor_idx + 1
return anchors
def horizontal_flip_ref(image, boxes):
if len(boxes.shape) == 1:
boxes[0], boxes[2] = 1.0 - boxes[2], 1.0 - boxes[0]
else:
boxes[:, 0], boxes[:, 2] = 1.0 - boxes[:, 2], 1.0 - boxes[:, 0]
return np.fliplr(image), boxes
def normalize_ref(image):
normalization_mean = [0.485, 0.456, 0.406]
normalization_std = [0.229, 0.224, 0.225]
image = image.astype(dtype=np.float64).transpose((2, 0, 1)) / 255
for plane, (m, s) in zip(range(len(image)), zip(normalization_mean, normalization_std)):
image[plane] = (image[plane] - m) / s
return image
def resize_ref(image, size):
from PIL import Image
return np.array(Image.fromarray(image).resize(size, Image.BILINEAR))
class DetectionPipeline(Pipeline):
def __init__(self, args, device_id, file_root, annotations_file):
super(DetectionPipeline, self).__init__(
batch_size=args.batch_size,
num_threads=args.num_workers,
device_id=device_id,
prefetch_queue_depth=args.prefetch,
seed=args.seed)
# Reading COCO dataset
self.input = ops.readers.COCO(
file_root=file_root,
annotations_file=annotations_file,
shard_id=device_id,
num_shards=args.num_gpus,
ratio=True,
ltrb=True,
random_shuffle=True)
self.decode_cpu = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.decode_crop = ops.decoders.ImageSlice(device="cpu", output_type=types.RGB)
self.decode_gpu = ops.decoders.Image(
device="mixed",
output_type=types.RGB,
hw_decoder_load=0)
self.decode_gpu_crop = ops.decoders.ImageSlice(
device="mixed",
output_type=types.RGB,
hw_decoder_load=0)
self.ssd_crop = ops.SSDRandomCrop(
device="cpu", num_attempts=1, seed=args.seed)
self.random_bbox_crop = ops.RandomBBoxCrop(
device="cpu",
aspect_ratio=[0.5, 2.0],
thresholds=[0, 0.1, 0.3, 0.5, 0.7, 0.9],
scaling=[0.3, 1.0],
bbox_layout="xyXY",
seed=args.seed)
self.slice_cpu = ops.Slice(device="cpu")
self.slice_gpu = ops.Slice(device="gpu")
self.resize_cpu = ops.Resize(
device="cpu",
resize_x=300,
resize_y=300,
min_filter=types.DALIInterpType.INTERP_TRIANGULAR)
self.resize_gpu = ops.Resize(
device="gpu",
resize_x=300,
resize_y=300,
min_filter=types.DALIInterpType.INTERP_TRIANGULAR)
mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
crop_size = (300, 300)
self.normalize_cpu = ops.CropMirrorNormalize(
device="cpu",
crop=crop_size,
mean=mean,
std=std,
mirror=0,
dtype=types.FLOAT)
self.normalize_gpu = ops.CropMirrorNormalize(
device="gpu",
crop=crop_size,
mean=mean,
std=std,
mirror=0,
dtype=types.FLOAT)
self.twist_cpu = ops.ColorTwist(device="cpu")
self.twist_gpu = ops.ColorTwist(device="gpu")
self.hsv_cpu = ops.Hsv(device="cpu", dtype=types.FLOAT)
self.hsv_gpu = ops.Hsv(device="gpu", dtype=types.FLOAT)
self.bc_cpu = ops.BrightnessContrast(device="cpu", dtype=types.UINT8, contrast_center=128)
self.bc_gpu = ops.BrightnessContrast(device="gpu", dtype=types.UINT8, contrast_center=128)
self.flip_cpu = ops.Flip(device="cpu")
self.bbox_flip_cpu = ops.BbFlip(device="cpu", ltrb=True)
self.flip_gpu = ops.Flip(device="gpu")
self.bbox_flip_gpu = ops.BbFlip(device="gpu", ltrb=True)
default_boxes = coco_anchors()
self.box_encoder_cpu = ops.BoxEncoder(
device="cpu",
criteria=0.5,
anchors=default_boxes)
self.box_encoder_gpu = ops.BoxEncoder(
device="gpu",
criteria=0.5,
anchors=default_boxes)
self.box_encoder_cpu_offsets = ops.BoxEncoder(
device="cpu",
criteria=0.5,
offset=True,
scale=2,
stds=[0.1, 0.1, 0.2, 0.2],
anchors=default_boxes)
self.box_encoder_gpu_offsets = ops.BoxEncoder(
device="gpu",
criteria=0.5,
offset=True,
scale=2,
stds=[0.1, 0.1, 0.2, 0.2],
anchors=default_boxes)
# Random variables
self.saturation_rng = ops.random.Uniform(range=[0.8, 1.2])
self.contrast_rng = ops.random.Uniform(range=[0.5, 1.5])
self.brighness_rng = ops.random.Uniform(range=[0.875, 1.125])
self.hue_rng = ops.random.Uniform(range=[-45, 45])
def define_graph(self):
# Random variables
saturation = self.saturation_rng()
contrast = self.contrast_rng()
brightness = self.brighness_rng()
hue = self.hue_rng()
inputs, boxes, labels = self.input(name="Reader")
image = self.decode_cpu(inputs)
image_ssd_crop, boxes_ssd_crop, labels_ssd_crop = self.ssd_crop(
image, boxes, labels)
crop_begin, crop_size, boxes_random_crop, labels_random_crop = \
self.random_bbox_crop(boxes, labels)
image_decode_crop = self.decode_crop(inputs, crop_begin, crop_size)
image_slice_cpu = self.slice_cpu(image, crop_begin, crop_size)
image_slice_gpu = self.slice_gpu(image.gpu(), crop_begin, crop_size)
image_resized_cpu = self.resize_cpu(image_ssd_crop)
image_resized_gpu = self.resize_gpu(image_ssd_crop.gpu())
image_normalized_cpu = self.normalize_cpu(image_resized_cpu)
image_normalized_gpu = self.normalize_gpu(image_resized_cpu.gpu())
image_twisted_cpu = self.hsv_cpu(image_ssd_crop, saturation=saturation, hue=hue)
image_twisted_cpu = self.bc_cpu(image_twisted_cpu, brightness=brightness, contrast=contrast)
image_twisted_gpu = self.hsv_gpu(image_ssd_crop.gpu(), saturation=saturation, hue=hue)
image_twisted_gpu = self.bc_gpu(image_twisted_gpu, brightness=brightness, contrast=contrast)
image_legacy_twisted_cpu = self.twist_cpu(
image_ssd_crop,
saturation=saturation,
contrast=contrast,
brightness=brightness,
hue=hue)
image_legacy_twisted_gpu = self.twist_gpu(
image_ssd_crop.gpu(),
saturation=saturation,
contrast=contrast,
brightness=brightness,
hue=hue)
image_flipped_cpu = self.flip_cpu(image_resized_cpu)
boxes_flipped_cpu = self.bbox_flip_cpu(boxes_ssd_crop)
image_flipped_gpu = self.flip_gpu(image_resized_cpu.gpu())
boxes_flipped_gpu = self.bbox_flip_gpu(boxes_ssd_crop.gpu())
encoded_boxes_cpu, encoded_labels_cpu = self.box_encoder_cpu(
boxes_ssd_crop, labels_ssd_crop)
encoded_boxes_gpu, encoded_labels_gpu = self.box_encoder_gpu(
boxes_ssd_crop.gpu(), labels_ssd_crop.gpu())
encoded_offset_boxes_cpu, encoded_offset_labels_cpu = self.box_encoder_cpu_offsets(
boxes_ssd_crop, labels_ssd_crop)
encoded_offset_boxes_gpu, encoded_offset_labels_gpu = self.box_encoder_gpu_offsets(
boxes_ssd_crop.gpu(), labels_ssd_crop.gpu())
image_gpu = self.decode_gpu(inputs)
image_gpu_slice_gpu = self.slice_gpu(image_gpu, crop_begin, crop_size)
image_decode_crop_gpu = self.decode_gpu_crop(inputs, crop_begin, crop_size)
return (
labels,
image_ssd_crop, image_decode_crop,
image_slice_cpu, image_slice_gpu,
boxes_ssd_crop, boxes_random_crop,
labels_ssd_crop, labels_random_crop,
image_resized_cpu, image_resized_gpu,
image_normalized_cpu, image_normalized_gpu,
image_twisted_cpu, image_twisted_gpu,
image_legacy_twisted_cpu, image_legacy_twisted_gpu,
image_flipped_cpu, image_flipped_gpu,
boxes_flipped_cpu, boxes_flipped_gpu,
encoded_boxes_cpu, encoded_boxes_gpu,
encoded_labels_cpu, encoded_labels_gpu,
encoded_offset_boxes_cpu, encoded_offset_boxes_gpu,
encoded_offset_labels_cpu, encoded_offset_labels_gpu,
image_decode_crop_gpu, image_gpu_slice_gpu
)
def data_paths(use_full_coco):
if use_full_coco:
coco = '/data/coco/coco-2017/coco2017/'
train = os.path.join(coco, 'train2017')
train_annotations = os.path.join(
coco, 'annotations/instances_train2017.json')
val = os.path.join(coco, 'val2017')
val_annotations = os.path.join(
coco, 'annotations/instances_val2017.json')
dataset = [(train, train_annotations), (val, val_annotations)]
else:
train = os.path.join(test_data_root, 'db', 'coco', 'images')
train_annotations = os.path.join(test_data_root, 'db', 'coco', 'instances.json')
dataset = [(train, train_annotations)]
return dataset
def set_iters(args, dataset_size):
if args.iters is None:
args.iters = int(ceil(
dataset_size / (args.batch_size * args.num_gpus)))
def compare(val_1, val_2, reference=None):
test = np.allclose(val_1, val_2)
if reference is not None:
test = test and np.allclose(val_1, reference)
test = test and np.allclose(val_2, reference)
return test
def crop_border(image, border):
return image[border:-border, border:-border, :]
def diff_against_eps(image_1, image_2, eps):
return np.absolute(image_1.astype(float) - image_2.astype(float)).max() <= eps
def relaxed_compare(val_1, val_2, reference=None, eps=1, border=0):
test = diff_against_eps(val_1, val_2, eps)
if reference is not None:
if border != 0:
reference = crop_border(reference, border)
test = test and diff_against_eps(
reference, crop_border(val_1, border), eps)
test = test and diff_against_eps(
reference, crop_border(val_2, border), eps)
else:
test = test and diff_against_eps(reference, val_1, eps)
test = test and diff_against_eps(reference, val_2, eps)
return test
def run_for_dataset(args, dataset):
print("Build pipeline")
pipes = [DetectionPipeline(args, device_id, dataset[0], dataset[1])
for device_id in range(args.num_gpus)]
[pipe.build() for pipe in pipes]
set_iters(args, pipes[0].epoch_size('Reader'))
for iter in range(args.iters):
for pipe in pipes:
labels, \
image_ssd_crop, image_decode_crop, \
image_slice_cpu, image_slice_gpu, \
boxes_ssd_crop, boxes_random_crop, \
labels_ssd_crop, labels_random_crop, \
image_resized_cpu, image_resized_gpu, \
image_normalized_cpu, image_normalized_gpu, \
image_twisted_cpu, image_twisted_gpu, \
image_legacy_twisted_cpu, image_legacy_twisted_gpu, \
image_flipped_cpu, image_flipped_gpu, \
boxes_flipped_cpu, boxes_flipped_gpu, \
encoded_boxes_cpu, encoded_boxes_gpu, \
encoded_labels_cpu, encoded_labels_gpu, \
encoded_offset_boxes_cpu, encoded_offset_boxes_gpu, \
encoded_offset_labels_cpu, encoded_offset_labels_gpu, \
image_decode_crop_gpu, image_gpu_slice_gpu = \
[np.squeeze(to_array(out)) for out in pipe.run()]
# Check reader
labels = ((labels > 0) & (labels <= 80)).all()
# Check cropping ops
decode_crop = compare(image_ssd_crop, image_decode_crop)
slice_cpu = compare(image_ssd_crop, image_slice_cpu)
slice_gpu = compare(image_ssd_crop, image_slice_gpu)
decode_crop_gpu = compare(image_gpu_slice_gpu, image_decode_crop_gpu)
image_crop = decode_crop and slice_cpu and slice_gpu and decode_crop_gpu
boxes_crop = compare(boxes_ssd_crop, boxes_random_crop)
labels_crop = compare(labels_ssd_crop, labels_random_crop)
crop = image_crop and boxes_crop and labels_crop
hsv_bc_twist = relaxed_compare(image_twisted_gpu, image_legacy_twisted_gpu, eps=4)
# Check resizing ops
resize = relaxed_compare(
val_1=image_resized_cpu,
val_2=image_resized_gpu,
reference=resize_ref(image_ssd_crop, (300, 300)),
border=1)
# Check normalizing ops
image_normalized_ref = normalize_ref(image_resized_cpu)
normalize = compare(
image_normalized_cpu, image_normalized_gpu, image_normalized_ref)
# Check twisting ops
twist_gpu_cpu = relaxed_compare(image_twisted_cpu, image_twisted_gpu, eps=2)
twist = twist_gpu_cpu and hsv_bc_twist
# Check flipping ops
image_flipped_ref, boxes_flipped_ref = horizontal_flip_ref(
image_resized_cpu, boxes_ssd_crop)
image_flip = compare(
image_flipped_cpu, image_flipped_gpu, image_flipped_ref)
boxes_flip = compare(
boxes_flipped_cpu, boxes_flipped_gpu, boxes_flipped_ref)
flip = image_flip and boxes_flip
# Check box encoding ops
encoded_boxes = compare(encoded_boxes_cpu, encoded_boxes_gpu)
encoded_labels = compare(encoded_labels_cpu, encoded_labels_gpu)
encoded_boxes_offset = compare(encoded_offset_boxes_cpu, encoded_offset_boxes_gpu)
encoded_labels_offset = compare(encoded_offset_labels_cpu, encoded_offset_labels_gpu)
encoded_labels_cpu = compare(encoded_labels_cpu, encoded_offset_labels_cpu)
encoded_labels_gpu = compare(encoded_labels_gpu, encoded_offset_labels_gpu)
box_encoder = encoded_boxes and encoded_boxes_offset and encoded_labels \
and encoded_labels_offset and encoded_labels_cpu and encoded_labels_gpu
if not labels or not crop or not resize or not normalize or not twist \
or not flip or not box_encoder:
print('Error during iteration', iter)
print('Labels = ', labels)
print('Crop = ', crop)
print(' decode_crop =', decode_crop)
print(' decode_crop_gpu =', decode_crop_gpu)
print(' slice_cpu =', slice_cpu)
print(' slice_gpu =', slice_gpu)
print(' boxes_crop =', boxes_crop)
print(' labels_crop =', labels_crop)
print('Resize =', resize)
print('Normalize =', normalize)
print('Twist =', twist)
print(' twist gpu vs cpu = ', twist_gpu_cpu)
print(' HSV + BC vs legacy Twist = ', hsv_bc_twist)
print('Flip =', flip)
print(' image_flip =', image_flip)
print(' boxes_flip =', boxes_flip)
print('Box encoder =', box_encoder)
print(' encoded_boxes =', encoded_boxes)
print(' encoded_boxes_offset =', encoded_boxes_offset)
print(' encoded_labels =', encoded_labels)
print(' encoded_labels_offset =', encoded_labels_offset)
print(' encoded_labels_cpu =', encoded_labels_cpu)
print(' encoded_labels_gpu =', encoded_labels_gpu)
exit(1)
if not iter % 100:
print("Iteration: {}/ {}".format(iter + 1, args.iters))
print("OK")
def print_args(args):
print('Args values:')
for arg in vars(args):
print('{0} = {1}'.format(arg, getattr(args, arg)))
print()
def run_test(args):
print_args(args)
for dataset in data_paths(args.use_full_coco):
print('Run DetectionPipeline test for', dataset[0])
run_for_dataset(args, dataset)
def make_parser():
parser = argparse.ArgumentParser(description='Detection pipeline test')
parser.add_argument(
'-i', '--iters', default=None, type=int, metavar='N',
help='number of iterations to run (default: whole dataset)')
parser.add_argument(
'-g', '--num_gpus', default=1, type=int, metavar='N',
help='number of GPUs (default: %(default)s)')
parser.add_argument(
'-s', '--seed', default=0, type=int, metavar='N',
help='seed for random ops (default: random seed)')
parser.add_argument(
'-w', '--num_workers', default=3, type=int, metavar='N',
help='number of worker threads (default: %(default)s)')
parser.add_argument(
'-p', '--prefetch', default=2, type=int, metavar='N',
help='prefetch queue depth (default: %(default)s)')
parser.add_argument(
'--use_full_coco', action='store_true',
help='Use full COCO data set for this test')
return parser
if __name__ == "__main__":
parser = make_parser()
args = parser.parse_args()
args.batch_size = 1
run_test(args)
|
DALI-main
|
dali/test/python/test_detection_pipeline.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali.fn as fn
import nvidia.dali.ops as ops
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.types as types
import numpy as np
from nose_utils import assert_raises
import sys
import inspect
from nose.plugins.attrib import attr
import nose
def _test_fn_rotate(device):
pipe = Pipeline(batch_size=1, num_threads=1, device_id=0)
image = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=np.uint8)[:, :, np.newaxis]
batch = [image]
input = fn.external_source([batch], layout="HWC")
rotated = fn.rotate(input.gpu() if device == "gpu" else input,
angle=90)
pipe.set_outputs(rotated)
pipe.build()
outs = pipe.run()
out = outs[0] if device == "cpu" else outs[0].as_cpu()
arr = out.at(0)
ref = np.array([
[4, 8, 12],
[3, 7, 11],
[2, 6, 10],
[1, 5, 9]])[:, :, np.newaxis]
assert np.array_equal(arr, ref)
def test_set_outputs():
data = [[[np.random.rand(1, 3, 2)], [np.random.rand(1, 4, 5)]]]
pipe = Pipeline(batch_size=1, num_threads=1, device_id=None)
pipe.set_outputs(fn.external_source(data, num_outputs=2, cycle='quiet'))
with assert_raises(TypeError,
glob='Illegal pipeline output type. '
'The output * contains a nested `DataNode`'):
pipe.build()
def test_set_outputs_err_msg_unpack():
data = [[[np.random.rand(1, 3, 2)], [np.random.rand(1, 4, 5)]]]
pipe = Pipeline(batch_size=1, num_threads=1, device_id=None)
pipe.set_outputs(fn.external_source(data, num_outputs=2, cycle='quiet'))
with assert_raises(TypeError,
glob='Illegal pipeline output type. '
'The output * contains a nested `DataNode`'):
pipe.build()
def test_set_outputs_err_msg_random_type():
pipe = Pipeline(batch_size=1, num_threads=1, device_id=None)
pipe.set_outputs("test")
with assert_raises(TypeError,
glob='Illegal output type. '
'The output * is a `<class \'str\'>`.'):
pipe.build()
def test_fn_rotate():
for device in ["cpu", "gpu"]:
yield _test_fn_rotate, device
def test_fn_python_function():
pipe = Pipeline(1, 1, 0, exec_pipelined=False, exec_async=False)
batch1 = [np.array([1, 2, 3])]
batch2 = [np.array([2, 3, 4])]
# we need a context, because we use an operator with potential side-effects (python_function)
with pipe:
src = fn.external_source([batch1, batch2])
out = fn.python_function(src, function=lambda x: x + 1)
pipe.set_outputs(out)
pipe.build()
assert np.array_equal(pipe.run()[0].at(0), batch1[0] + 1)
assert np.array_equal(pipe.run()[0].at(0), batch2[0] + 1)
def test_fn_multiple_input_sets():
pipe = Pipeline(batch_size=1, num_threads=1, device_id=0)
image1 = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=np.uint8)[:, :, np.newaxis]
image2 = np.array([
[10, 20],
[30, 40],
[50, 60]], dtype=np.uint8)[:, :, np.newaxis]
batches = [[image1], [image2]]
inputs = fn.external_source(lambda: batches, 2, layout="HWC")
rotated = fn.rotate(inputs, angle=90)
pipe.set_outputs(*rotated)
pipe.build()
outs = pipe.run()
arr1 = outs[0].at(0)
arr2 = outs[1].at(0)
ref1 = np.array([
[4, 8, 12],
[3, 7, 11],
[2, 6, 10],
[1, 5, 9]])[:, :, np.newaxis]
ref2 = np.array([
[20, 40, 60],
[10, 30, 50]], dtype=np.uint8)[:, :, np.newaxis]
assert np.array_equal(arr1, ref1)
assert np.array_equal(arr2, ref2)
def test_scalar_constant():
pipe = Pipeline(batch_size=1, num_threads=1, device_id=0)
image1 = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]], dtype=np.uint8)[:, :, np.newaxis]
image2 = np.array([
[10, 20],
[30, 40],
[50, 60]], dtype=np.uint8)[:, :, np.newaxis]
batches = [[image1], [image2]]
inputs = fn.external_source(lambda: batches, 2, layout="HWC")
rotated = fn.rotate(inputs, angle=types.ScalarConstant(90))
pipe.set_outputs(*rotated, types.ScalarConstant(90))
pipe.build()
outs = pipe.run()
arr1 = outs[0].at(0)
arr2 = outs[1].at(0)
arr3 = outs[2].at(0)
ref1 = np.array([
[4, 8, 12],
[3, 7, 11],
[2, 6, 10],
[1, 5, 9]])[:, :, np.newaxis]
ref2 = np.array([
[20, 40, 60],
[10, 30, 50]], dtype=np.uint8)[:, :, np.newaxis]
ref3 = np.array(90)
assert np.array_equal(arr1, ref1)
assert np.array_equal(arr2, ref2)
assert np.array_equal(arr3, ref3)
def test_to_snake_case_impl():
fn_name_tests = [
('Test', 'test'),
('OneTwo', 'one_two'),
('TestXYZ', 'test_xyz'),
('testX', 'test_x'),
('TestXx', 'test_xx'),
('testXX', 'test_xx'),
('OneXYZTwo', 'one_xyz_two'),
('MFCC', 'mfcc'),
('RandomBBoxCrop', 'random_bbox_crop'),
('STFT_CPU', 'stft_cpu'),
('DOUBLE__UNDERSCORE', 'double__underscore'),
('double__underscore', 'double__underscore'),
('XYZ1ABC', 'xyz1abc'),
('XYZ1abc', 'xyz1abc'),
('trailing__', 'trailing__'),
('TRAILING__', 'trailing__'),
('Caffe2Reader', 'caffe2_reader'),
('COCOReader', 'coco_reader'),
('DLTensorPythonFunction', 'dl_tensor_python_function'),
('TFRecordReader', 'tfrecord_reader'),
('_Leading', '_leading'),
('_LEADing', '_lea_ding'),
('_LeAdIng', '_le_ad_ing'),
('_L_Eading', '_l_eading'),
]
for inp, out in fn_name_tests:
assert fn._to_snake_case(inp) == out, f"{fn._to_snake_case(inp)} != {out}"
def _test_schema_name_for_module(module_name, base_name=""):
""" Sanity test if we didn't miss the _schema_name for any op with custom wrapper """
if module_name.endswith("hidden"):
return
if base_name == "":
base_name = module_name
dali_module = sys.modules[module_name]
for member_name in dir(dali_module):
if member_name.startswith("_"):
continue
member = getattr(dali_module, member_name)
if inspect.isfunction(member):
# Check if we can reconstruct the name of the op from provided schema
assert hasattr(member, "_schema_name")
full_name = ops._op_name(member._schema_name)
nose.tools.eq_(base_name + "." + full_name, module_name + "." + member_name)
elif inspect.ismodule(member) and (module_name + "." + member_name) in sys.modules.keys():
# Recurse on DALI submodule (filter out non-DALI reexported modules like `sys`)
_test_schema_name_for_module(module_name + "." + member_name, base_name)
def test_schema_name():
_test_schema_name_for_module('nvidia.dali.fn')
@attr('pytorch')
def test_schema_name_torch():
import nvidia.dali.plugin.pytorch # noqa: F401
_test_schema_name_for_module('nvidia.dali.plugin.pytorch.fn')
@attr('numba')
def test_schema_name_numba():
import nvidia.dali.plugin.numba # noqa: F401
_test_schema_name_for_module('nvidia.dali.plugin.numba.fn.experimental')
|
DALI-main
|
dali/test/python/test_functional_api.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali as dali
from nose.tools import with_setup
from test_pool_utils import capture_processes, teardown_function, setup_function
from test_utils import compare_pipelines, check_batch, RandomDataIterator, \
RandomlyShapedDataIterator
class ExtCallback:
"""Callable to generate specified data samples"""
def __init__(self, dims, epoch_size, dtype, exception_class=StopIteration, random_data=False,
random_shape=False):
self.dims = dims
self.epoch_size = epoch_size
self.dtype = dtype
self.exception_class = exception_class
self.ds = {}
self.random_data = random_data
self.random_shape = random_shape
self.data_iterator = None
self.iterator_data_samples = []
if random_data and not random_shape:
self.data_iterator = iter(RandomDataIterator(1, shape=dims, dtype=dtype))
if random_data and random_shape:
self.data_iterator = iter(RandomlyShapedDataIterator(1, max_shape=dims, dtype=dtype))
if not random_data and random_shape:
raise ValueError("If random_shape is required the random_data is required to be True.")
def __call__(self, sample_info):
if sample_info.idx_in_epoch >= self.epoch_size:
raise self.exception_class
if self.data_iterator:
while len(self.iterator_data_samples) <= sample_info.idx_in_epoch:
batch = self.data_iterator.next()
self.iterator_data_samples.append(batch[0])
return self.iterator_data_samples[sample_info.idx_in_epoch]
if sample_info.idx_in_epoch not in self.ds:
self.ds[sample_info.idx_in_epoch] = np.full(
self.dims, sample_info.idx_in_epoch, dtype=self.dtype)
return self.ds[sample_info.idx_in_epoch]
class ExtCallbackTensorCPU(ExtCallback):
def __call__(self, sample_info):
return dali.tensors.TensorCPU(super().__call__(sample_info))
def create_pipe(callback, device, batch_size, num_outputs=None, layout=None, py_num_workers=None,
py_start_method="fork", parallel=True, device_id=0, batch=False, num_threads=1,
cycle=None, batch_info=None, prefetch_queue_depth=2, reader_queue_depth=None):
pipe = dali.pipeline.Pipeline(batch_size, num_threads, device_id, py_num_workers=py_num_workers,
py_start_method=py_start_method,
prefetch_queue_depth=prefetch_queue_depth)
with pipe:
inputs = dali.fn.external_source(callback, num_outputs=num_outputs, device=device,
layout=layout, batch=batch, parallel=parallel, cycle=cycle,
batch_info=batch_info,
prefetch_queue_depth=reader_queue_depth)
if num_outputs is None:
pipe.set_outputs(inputs)
else:
pipe.set_outputs(*inputs)
return pipe
def build_and_run_pipeline(pipe, iters=None, *args):
pipe.build()
capture_processes(pipe._py_pool)
if iters is None:
while True:
pipe.run()
else:
for _ in range(iters):
pipe.run()
# dtype is ignored but pass it so that is showed by nosetest
def check_callback(parallel_pipe, pipe, epoch_size, batch_size, dtype=None):
iters_no = epoch_size // batch_size
parallel_pipe.build()
pipe.build()
capture_processes(parallel_pipe._py_pool)
compare_pipelines(parallel_pipe, pipe, batch_size, iters_no)
@with_setup(setup_function, teardown_function)
def _check_spawn_with_callback(callback, callback_ref, batch_size, num_outputs, layout, workers_num,
epoch_size, dtype):
pipe_parallel = create_pipe(callback, 'cpu', batch_size, py_num_workers=workers_num,
py_start_method='spawn', parallel=True, num_outputs=num_outputs,
layout=layout)
pipe = create_pipe(callback_ref, 'cpu', batch_size, parallel=False, num_outputs=num_outputs,
layout=layout)
check_callback(pipe_parallel, pipe, epoch_size, batch_size, dtype)
def check_spawn_with_callback(callback_class, callback_ref_class=ExtCallback, num_outputs=None,
layout=None, dtypes=[np.float32, np.int32, np.uint8], shapes=[(4, 5)],
random_data=False, random_shape=False):
epoch_size = 250
for shape in shapes:
for dtype in dtypes:
callback = callback_class(shape, epoch_size, dtype, random_data=random_data,
random_shape=random_shape)
callback_ref = callback_ref_class(shape, epoch_size, dtype, random_data=random_data,
random_shape=random_shape)
for workers_num in [1, 4]:
for batch_size in [1, 16, 150]:
yield _check_spawn_with_callback, callback, callback_ref, batch_size, \
num_outputs, layout, workers_num, epoch_size, dtype
class ExtCallbackMultipleOutputs(ExtCallback):
def __call__(self, sample_info):
a = super().__call__(sample_info)
return a, np.array([sample_info.idx_in_batch])
class CustomException(Exception):
pass
def check_stop_iteration_resume(pipe, batch_size, layout):
pipe.build()
capture_processes(pipe._py_pool)
outputs_epoch_1, outputs_epoch_2 = [], []
for output in [outputs_epoch_1, outputs_epoch_2]:
try:
while True:
(r,) = pipe.run()
r = [np.copy(r.at(i)) for i in range(len(r))]
output.append(r)
except StopIteration:
pipe.reset()
assert len(outputs_epoch_1) == len(outputs_epoch_2), (
"Epochs must have same number of iterations, "
"but they have {} {} respectively".format(len(outputs_epoch_1), len(outputs_epoch_2)))
for out_1, out_2 in zip(outputs_epoch_1, outputs_epoch_2):
check_batch(out_1, out_2, batch_size, 0, None, expected_layout=layout, compare_layouts=True)
def check_layout(pipe, layout):
pipe.build()
capture_processes(pipe._py_pool)
while True:
try:
(res,) = pipe.run()
assert res.layout() == layout
except StopIteration:
break
|
DALI-main
|
dali/test/python/test_external_source_parallel_utils.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cupy
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import numpy as np
import test_utils
import random
import os
def random_seed():
return int(random.random() * (1 << 32))
test_data_root = os.environ['DALI_EXTRA_PATH']
images_dir = os.path.join(test_data_root, 'db', 'single', 'jpeg')
DEVICE_ID = 0
BATCH_SIZE = 8
ITERS = 32
SEED = random_seed()
NUM_WORKERS = 6
class PythonFunctionPipeline(Pipeline):
def __init__(self, function, device, num_outputs=1):
super(PythonFunctionPipeline, self).__init__(BATCH_SIZE, NUM_WORKERS, DEVICE_ID,
seed=SEED)
self.device = device
self.reader = ops.readers.File(file_root=images_dir)
self.decode = ops.decoders.Image(device='cpu',
output_type=types.RGB)
self.norm = ops.CropMirrorNormalize(std=255., mean=0., device=device, output_layout="HWC")
self.func = ops.PythonFunction(device=device, function=function, num_outputs=num_outputs)
def define_graph(self):
jpegs, labels = self.reader()
decoded = self.decode(jpegs)
images = decoded if self.device == 'cpu' else decoded.gpu()
normalized = self.norm(images)
return self.func(normalized, normalized)
def validate_cpu_vs_gpu(gpu_fun, cpu_fun, num_outputs=1):
gpu_pipe = PythonFunctionPipeline(gpu_fun, 'gpu', num_outputs)
cpu_pipe = PythonFunctionPipeline(cpu_fun, 'cpu', num_outputs)
test_utils.compare_pipelines(gpu_pipe, cpu_pipe, BATCH_SIZE, ITERS)
def arrays_arithmetic(in1, in2):
return in1 + in2, in1 - in2 / 2.
def test_simple_arithm():
validate_cpu_vs_gpu(arrays_arithmetic, arrays_arithmetic, num_outputs=2)
square_diff_kernel = cupy.ElementwiseKernel(
'T x, T y',
'T z',
'z = x*x - y*y',
'square_diff'
)
def square_diff(in1, in2):
return in1 * in1 - in2 * in2
def test_cupy_kernel():
validate_cpu_vs_gpu(square_diff_kernel, square_diff)
def test_builtin_func():
validate_cpu_vs_gpu(cupy.logaddexp, np.logaddexp)
|
DALI-main
|
dali/test/python/test_gpu_python_function_operator.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.fn as fn
import nvidia.dali.plugin.tf as dali_tf
from nose_utils import raises
from test_utils_tensorflow import get_image_pipeline
@raises(ValueError, "Two structures don't have the same sequence length*length 3*length 2")
def test_different_num_shapes_dtypes():
batch_size = 12
num_threads = 4
dataset_pipe, shapes, dtypes = get_image_pipeline(batch_size, num_threads, 'cpu')
dtypes = tuple(dtypes[0:2])
with tf.device('/cpu:0'):
dali_tf.DALIDataset(
pipeline=dataset_pipe,
batch_size=batch_size,
output_shapes=shapes,
output_dtypes=dtypes,
num_threads=num_threads)
@raises(RuntimeError, "some operators*cannot be used with TensorFlow Dataset API and DALIIterator")
def test_python_operator_not_allowed_in_tf_dataset_error():
pipeline = Pipeline(1, 1, 0, exec_pipelined=False, exec_async=False)
with pipeline:
output = fn.python_function(function=lambda: np.zeros((3, 3, 3)))
pipeline.set_outputs(output)
shapes = ((1, 3, 3, 3))
dtypes = (tf.float32)
with tf.device('/cpu:0'):
_ = dali_tf.DALIDataset(
pipeline=pipeline,
batch_size=1,
output_shapes=shapes,
output_dtypes=dtypes,
num_threads=1,
device_id=0)
|
DALI-main
|
dali/test/python/test_dali_tf_dataset.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import random
def make_batch_select_masks(batch_size,
npolygons_range=(1, 10),
nvertices_range=(3, 40),
vertex_ndim=2,
vertex_dtype=np.float32):
polygons = []
vertices = []
selected_masks = []
for _ in range(batch_size):
nmasks = random.randint(*npolygons_range)
available_masks = list(range(nmasks))
selected_masks.append(np.array(
random.sample(available_masks, random.randint(1, nmasks)), dtype=np.int32))
vertex_count = 0
mask_id = 0
curr_polygons = np.zeros([nmasks, 3], dtype=np.int32)
for m in range(nmasks):
nvertices = random.randint(*nvertices_range)
curr_polygons[m, :] = (mask_id, vertex_count, vertex_count + nvertices)
vertex_count = vertex_count + nvertices
mask_id = mask_id + 1
polygons.append(curr_polygons)
if np.issubdtype(vertex_dtype, np.integer):
vertices.append(
np.random.randint(
low=np.iinfo(vertex_dtype).min,
high=np.iinfo(vertex_dtype).max,
size=(vertex_count, vertex_ndim),
dtype=vertex_dtype
)
)
else:
vertices.append(
np.array(
np.random.rand(vertex_count, vertex_ndim),
dtype=vertex_dtype
)
)
return polygons, vertices, selected_masks
|
DALI-main
|
dali/test/python/segmentation_test_utils.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali.ops as ops
import nvidia.dali.plugin.tf as dali_tf
import nvidia.dali.types as types
import os.path
import tensorflow as tf
from nvidia.dali.pipeline import Pipeline
from nose_utils import raises
from test_utils import get_dali_extra_path
try:
tf.compat.v1.disable_eager_execution()
except ModuleNotFoundError:
pass
test_data_root = get_dali_extra_path()
lmdb_folder = os.path.join(test_data_root, 'db', 'lmdb')
IMG_SIZE = 227
NUM_GPUS = 1
class CommonPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super().__init__(batch_size, num_threads, device_id)
self.decode = ops.decoders.Image(device="mixed", output_type=types.RGB)
self.resize = ops.Resize(device="gpu", interp_type=types.INTERP_LINEAR)
self.cmn = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
crop=(227, 227),
mean=[128., 128., 128.],
std=[1., 1., 1.])
self.uniform = ops.random.Uniform(range=(0.0, 1.0))
self.resize_rng = ops.random.Uniform(range=(256, 480))
def base_define_graph(self, inputs, labels):
images = self.decode(inputs)
images = self.resize(images, resize_shorter=self.resize_rng())
output = self.cmn(images,
crop_pos_x=self.uniform(),
crop_pos_y=self.uniform())
return (output, labels.gpu())
class CaffeReadPipeline(CommonPipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus):
super().__init__(batch_size, num_threads, device_id)
self.input = ops.readers.Caffe(path=lmdb_folder, random_shuffle=True,
shard_id=device_id, num_shards=num_gpus)
def define_graph(self):
images, labels = self.input()
return self.base_define_graph(images, labels)
def get_batch_dali(batch_size, pipe_type, label_type, num_gpus=1):
pipes = [pipe_type(batch_size=batch_size, num_threads=2, device_id=device_id, num_gpus=num_gpus)
for device_id in range(num_gpus)]
daliop = dali_tf.DALIIterator()
images = []
labels = []
for d in range(NUM_GPUS):
with tf.device('/gpu:%i' % d):
image, label = daliop(pipeline=pipes[d],
shapes=[(batch_size, 3, 227, 227), ()],
dtypes=[tf.int32, label_type],
device_id=d)
images.append(image)
labels.append(label)
return [images, labels]
def test_dali_tf_op(pipe_type=CaffeReadPipeline, batch_size=16, iterations=32):
test_batch = get_batch_dali(batch_size, pipe_type, tf.int32)
try:
from tensorflow.compat.v1 import GPUOptions
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import Session
except ImportError:
# Older TF versions don't have compat.v1 layer
from tensorflow import GPUOptions
from tensorflow import ConfigProto
from tensorflow import Session
gpu_options = GPUOptions(per_process_gpu_memory_fraction=0.5)
config = ConfigProto(gpu_options=gpu_options)
with Session(config=config) as sess:
for i in range(iterations):
imgs, labels = sess.run(test_batch)
# Testing correctness of labels
for label in labels:
# labels need to be integers
assert np.equal(np.mod(label, 1), 0).all()
assert (label >= 0).all()
assert (label <= 999).all()
class PythonOperatorPipeline(Pipeline):
def __init__(self):
super().__init__(1, 1, 0, 0)
self.python_op = ops.PythonFunction(function=lambda: np.zeros((3, 3, 3)))
def define_graph(self):
return self.python_op()
@raises(RuntimeError,
glob='Note that some operators * cannot be used with TensorFlow Dataset API and DALIIterator') # noqa: E501
def test_python_operator_error():
daliop = dali_tf.DALIIterator()
pipe = PythonOperatorPipeline()
with tf.device('/cpu:0'):
_ = daliop(pipeline=pipe, shapes=[(1, 3, 3, 3)], dtypes=[tf.float32], device_id=0)
|
DALI-main
|
dali/test/python/test_dali_tf_plugin_run.py
|
# Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import nvidia.dali.ops as ops
import nvidia.dali.tfrecord as tfrec
import nvidia.dali.types as types
import os
from nvidia.dali.pipeline import Pipeline
from test_utils import get_dali_extra_path
class CommonPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(CommonPipeline, self).__init__(batch_size, num_threads, device_id)
self.decode_gpu = ops.decoders.Image(device="mixed", output_type=types.RGB)
self.decode_host = ops.decoders.Image(device="cpu", output_type=types.RGB)
def base_define_graph(self, inputs, labels):
images_gpu = self.decode_gpu(inputs)
images_host = self.decode_host(inputs)
return images_gpu, images_host, labels
class MXNetReaderPipeline(CommonPipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus, data_paths, dont_use_mmap):
super(MXNetReaderPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.readers.MXNet(path=data_paths[0], index_path=data_paths[1],
shard_id=device_id, num_shards=num_gpus,
dont_use_mmap=dont_use_mmap)
def define_graph(self):
images, labels = self.input(name="Reader")
return self.base_define_graph(images, labels)
class CaffeReadPipeline(CommonPipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus, data_paths, dont_use_mmap):
super(CaffeReadPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.readers.Caffe(path=data_paths[0], shard_id=device_id,
num_shards=num_gpus, dont_use_mmap=dont_use_mmap)
def define_graph(self):
images, labels = self.input(name="Reader")
return self.base_define_graph(images, labels)
class Caffe2ReadPipeline(CommonPipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus, data_paths, dont_use_mmap):
super(Caffe2ReadPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.readers.Caffe2(path=data_paths[0], shard_id=device_id,
num_shards=num_gpus, dont_use_mmap=dont_use_mmap)
def define_graph(self):
images, labels = self.input(name="Reader")
return self.base_define_graph(images, labels)
class FileReadPipeline(CommonPipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus, data_paths, dont_use_mmap):
super(FileReadPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.readers.File(file_root=data_paths[0], shard_id=device_id,
num_shards=num_gpus, dont_use_mmap=dont_use_mmap)
def define_graph(self):
images, labels = self.input(name="Reader")
return self.base_define_graph(images, labels)
class TFRecordPipeline(CommonPipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus, data_paths, dont_use_mmap):
super(TFRecordPipeline, self).__init__(batch_size, num_threads, device_id)
tfrecord = sorted(glob.glob(data_paths[0]))
tfrecord_idx = sorted(glob.glob(data_paths[1]))
self.input = ops.readers.TFRecord(
path=tfrecord,
index_path=tfrecord_idx,
shard_id=device_id,
num_shards=num_gpus,
features={
"image/encoded": tfrec.FixedLenFeature((), tfrec.string, ""),
"image/class/label": tfrec.FixedLenFeature([1], tfrec.int64, -1)
},
dont_use_mmap=dont_use_mmap)
def define_graph(self):
inputs = self.input(name="Reader")
images = inputs["image/encoded"]
labels = inputs["image/class/label"]
return self.base_define_graph(images, labels)
class COCOReaderPipeline(CommonPipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus, data_paths, dont_use_mmap):
super(COCOReaderPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.readers.COCO(file_root=data_paths[0], annotations_file=data_paths[1],
shard_id=device_id, num_shards=num_gpus,
dont_use_mmap=dont_use_mmap)
def define_graph(self):
images, bb, labels = self.input(name="Reader")
return self.base_define_graph(images, labels)
test_data = {
FileReadPipeline: [["/data/imagenet/train-jpeg"],
["/data/imagenet/val-jpeg"]],
MXNetReaderPipeline: [["/data/imagenet/train-480-val-256-recordio/train.rec",
"/data/imagenet/train-480-val-256-recordio/train.idx"],
["/data/imagenet/train-480-val-256-recordio/val.rec",
"/data/imagenet/train-480-val-256-recordio/val.idx"]],
CaffeReadPipeline: [["/data/imagenet/train-lmdb-256x256"],
["/data/imagenet/val-lmdb-256x256"]],
Caffe2ReadPipeline: [["/data/imagenet/train-c2lmdb-480"],
["/data/imagenet/val-c2lmdb-256"]],
TFRecordPipeline: [["/data/imagenet/train-val-tfrecord-480/train-*",
"/data/imagenet/train-val-tfrecord-480.idx/train-*"]],
COCOReaderPipeline: [["/data/coco/coco-2017/coco2017/train2017",
"/data/coco/coco-2017/coco2017/annotations/instances_train2017.json"],
["/data/coco/coco-2017/coco2017/val2017",
"/data/coco/coco-2017/coco2017/annotations/instances_val2017.json"]]
}
data_root = get_dali_extra_path()
small_test_data = {
FileReadPipeline: [[os.path.join(data_root, "db/single/jpeg/")]],
MXNetReaderPipeline: [[os.path.join(data_root, "db/recordio/train.rec"),
os.path.join(data_root, "db/recordio/train.idx")]],
CaffeReadPipeline: [[os.path.join(data_root, "db/lmdb")]],
Caffe2ReadPipeline: [[os.path.join(data_root, "db/c2lmdb")]],
TFRecordPipeline: [[os.path.join(data_root, "db/tfrecord/train"),
os.path.join(data_root, "db/tfrecord/train.idx")]],
COCOReaderPipeline: [[os.path.join(data_root, "db/coco/images"),
os.path.join(data_root, "db/coco/instances.json")]]
}
parser = argparse.ArgumentParser(description='ImageDecoder RN50 dataset test')
parser.add_argument('-g', '--gpus', default=1, type=int, metavar='N',
help='number of GPUs (default: 1)')
parser.add_argument('-b', '--batch', default=2048, type=int, metavar='N',
help='batch size (default: 2048)')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('-s', '--small', action='store_true',
help='use small dataset, DALI_EXTRA_PATH needs to be set')
parser.add_argument('-n', '--no-mmap', action='store_true',
help="don't mmap files from data set")
args = parser.parse_args()
N = args.gpus # number of GPUs
BATCH_SIZE = args.batch # batch size
LOG_INTERVAL = args.print_freq
SMALL_DATA_SET = args.small
USE_MMAP = not args.no_mmap
print(f"GPUs: {N}, batch: {BATCH_SIZE}, loging interval: {LOG_INTERVAL}, "
f"small dataset: {SMALL_DATA_SET}, use mmap: {USE_MMAP}")
if SMALL_DATA_SET:
test_data = small_test_data
for pipe_name in test_data.keys():
data_set_len = len(test_data[pipe_name])
for i, data_set in enumerate(test_data[pipe_name]):
pipes = [pipe_name(batch_size=BATCH_SIZE, num_threads=4, device_id=n, num_gpus=N,
data_paths=data_set, dont_use_mmap=not USE_MMAP) for n in range(N)]
[pipe.build() for pipe in pipes]
iters = pipes[0].epoch_size("Reader")
assert all(pipe.epoch_size("Reader") == iters for pipe in pipes)
iters_tmp = iters
iters = iters // BATCH_SIZE
if iters_tmp != iters * BATCH_SIZE:
iters += 1
iters_tmp = iters
iters = iters // N
if iters_tmp != iters * N:
iters += 1
print("RUN {0}/{1}: {2}".format(i + 1, data_set_len, pipe_name.__name__))
print(data_set)
for j in range(iters):
for pipe in pipes:
pipe.schedule_run()
for pipe in pipes:
pipe.outputs()
if j % LOG_INTERVAL == 0:
print(pipe_name.__name__, j + 1, "/", iters)
print("OK {0}/{1}: {2}".format(i + 1, data_set_len, pipe_name.__name__))
|
DALI-main
|
dali/test/python/test_data_containers.py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import Pipeline, pipeline_def
from nose.tools import nottest
from nose_utils import raises
import nvidia.dali.fn as fn
from test_utils import get_dali_extra_path, compare_pipelines
import os
data_root = get_dali_extra_path()
images_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
N_ITER = 2
max_batch_size = 4
num_threads = 4
device_id = 0
def reference_pipeline(flip_vertical, flip_horizontal, ref_batch_size=max_batch_size):
pipeline = Pipeline(ref_batch_size, num_threads, device_id)
with pipeline:
data, _ = fn.readers.file(file_root=images_dir)
img = fn.decoders.image(data)
flipped = fn.flip(img, horizontal=flip_horizontal, vertical=flip_vertical)
pipeline.set_outputs(flipped, img)
return pipeline
@nottest # pipeline_def works with other decorators too
@pipeline_def(batch_size=max_batch_size, num_threads=num_threads, device_id=device_id)
def pipeline_static(flip_vertical, flip_horizontal):
data, _ = fn.readers.file(file_root=images_dir)
img = fn.decoders.image(data)
flipped = fn.flip(img, horizontal=flip_horizontal, vertical=flip_vertical)
return flipped, img
@nottest
@pipeline_def
def pipeline_runtime(flip_vertical, flip_horizontal):
data, _ = fn.readers.file(file_root=images_dir)
img = fn.decoders.image(data)
flipped = fn.flip(img, horizontal=flip_horizontal, vertical=flip_vertical)
return flipped, img
@nottest
def test_pipeline_static(flip_vertical, flip_horizontal):
put_args = pipeline_static(flip_vertical, flip_horizontal)
ref = reference_pipeline(flip_vertical, flip_horizontal)
compare_pipelines(put_args, ref, batch_size=max_batch_size, N_iterations=N_ITER)
@nottest
def test_pipeline_runtime(flip_vertical, flip_horizontal):
put_combined = pipeline_runtime(flip_vertical, flip_horizontal, batch_size=max_batch_size,
num_threads=num_threads, device_id=device_id)
ref = reference_pipeline(flip_vertical, flip_horizontal)
compare_pipelines(put_combined, ref, batch_size=max_batch_size, N_iterations=N_ITER)
@nottest
def test_pipeline_override(flip_vertical, flip_horizontal, batch_size):
put_combined = pipeline_static(flip_vertical, flip_horizontal, batch_size=batch_size,
num_threads=num_threads, device_id=device_id)
ref = reference_pipeline(flip_vertical, flip_horizontal, ref_batch_size=batch_size)
compare_pipelines(put_combined, ref, batch_size=batch_size, N_iterations=N_ITER)
def test_pipeline_decorator():
for vert in [0, 1]:
for hori in [0, 1]:
yield test_pipeline_static, vert, hori
yield test_pipeline_runtime, vert, hori
yield test_pipeline_override, vert, hori, 5
yield test_pipeline_runtime, fn.random.coin_flip(seed=123), fn.random.coin_flip(seed=234)
yield test_pipeline_static, fn.random.coin_flip(seed=123), fn.random.coin_flip(seed=234)
def test_duplicated_argument():
@pipeline_def(batch_size=max_batch_size, num_threads=num_threads, device_id=device_id)
def ref_pipeline(val):
data, _ = fn.readers.file(file_root=images_dir)
return data + val
@pipeline_def(batch_size=max_batch_size, num_threads=num_threads, device_id=device_id)
def pipeline_duplicated_arg(max_streams):
data, _ = fn.readers.file(file_root=images_dir)
return data + max_streams
pipe = pipeline_duplicated_arg(max_streams=42)
assert pipe._max_streams == -1
ref = ref_pipeline(42)
compare_pipelines(pipe, ref, batch_size=max_batch_size, N_iterations=N_ITER)
@pipeline_def
def pipeline_kwargs(arg1, arg2, *args, **kwargs):
pass
@raises(TypeError, regex="\\*\\*kwargs.*not allowed")
def test_kwargs_exception():
pipeline_kwargs(arg1=1, arg2=2, arg3=3)
def test_is_pipeline_def():
@pipeline_def
def pipe():
return 42
@pipeline_def()
def pipe_unconf():
return 42
@pipeline_def(max_batch_size=1, num_threads=1, device_id=0)
def pipe_conf():
return 42
assert getattr(pipe, '_is_pipeline_def', False)
assert getattr(pipe_unconf, '_is_pipeline_def', False)
assert getattr(pipe_conf, '_is_pipeline_def', False)
|
DALI-main
|
dali/test/python/test_pipeline_decorator.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import os
from itertools import product
from nvidia.dali.pipeline.experimental import pipeline_def
from time import time
from test_utils import get_dali_extra_path
@pipeline_def(device_id=0)
def rn50_pipeline(data_path):
uniform = fn.random.uniform(range=(0., 1.), shape=2)
resize_uniform = fn.random.uniform(range=(256., 480.))
mirror = fn.random.coin_flip(probability=0.5)
jpegs, _ = fn.readers.file(file_root=data_path)
images = fn.decoders.image(jpegs, output_type=types.RGB)
resized_images = fn.fast_resize_crop_mirror(images, crop=(224, 224), crop_pos_x=uniform[0],
crop_pos_y=uniform[1], mirror=mirror,
resize_shorter=resize_uniform)
output = fn.crop_mirror_normalize(resized_images.gpu(), device='gpu', dtype=types.FLOAT16,
mean=[128., 128., 128.], std=[1., 1., 1.])
return output
@pipeline_def(device_id=0)
def rn50_pipeline_2(data_path):
uniform = fn.random.uniform(range=(0., 1.), shape=2)
resize_uniform = fn.random.uniform(range=(256., 480.))
mirror = fn.random.coin_flip(probability=0.5)
jpegs, _ = fn.readers.file(file_root=data_path)
images = fn.decoders.image(jpegs, device='mixed', output_type=types.RGB)
resized_images = fn.resize(images, device='gpu', interp_type=types.INTERP_LINEAR,
resize_shorter=resize_uniform)
output = fn.crop_mirror_normalize(resized_images, device='gpu', dtype=types.FLOAT16,
crop=(224, 224), mean=[128., 128., 128.], std=[1., 1., 1.],
mirror=mirror, crop_pos_x=uniform[0], crop_pos_y=uniform[1])
return output
def run_benchmark(pipe_fun, batch_size, num_threads, num_samples, debug, data_path):
num_iters = num_samples // batch_size
times = np.empty(num_iters + 1)
times[0] = time()
pipe = pipe_fun(data_path, batch_size=batch_size, num_threads=num_threads, debug=debug)
pipe.build()
build_time = time()
for i in range(num_iters):
pipe.run()
times[i + 1] = time()
full_time = times[-1] - build_time
times = np.diff(times)
return full_time, times[0], times[1:]
def test_rn50_benchmark(pipe_fun=rn50_pipeline, batch_size=8, num_threads=2, num_samples=256,
data_path=None, save_df=None):
if not data_path:
data_path = os.path.join(get_dali_extra_path(), 'db/single/jpeg')
print(f'num_threads: {num_threads}, batch_size: {batch_size}')
full_stand, build_stand, times_stand = run_benchmark(pipe_fun, batch_size, num_threads,
num_samples, False, data_path)
iter_time_stand = np.mean(times_stand[1:]) / batch_size
avg_speed_stand = num_samples / full_stand
print(
f'Stand pipeline --- time: {full_stand:8.5f} [s] --- '
f'build + 1st iter time: {build_stand:.5f} [s] --- '
f'avg iter time per sample: {iter_time_stand:7.5f} [s] --- '
f'avg speed: {avg_speed_stand:8.3f} [img/s]')
full_debug, build_debug, times_debug = run_benchmark(pipe_fun, batch_size, num_threads,
num_samples, True, data_path)
iter_time_debug = np.mean(times_debug[1:]) / batch_size
avg_speed_debug = num_samples / full_debug
print(
f'Debug pipeline --- time: {full_debug:8.5f} [s] --- '
f'build + 1st iter time: {build_debug:.5f} [s] --- '
f'avg iter time per sample: {iter_time_debug:7.5f} [s] --- '
f'avg speed: {avg_speed_debug:8.3f} [img/s]')
if save_df is not None:
df = pd.DataFrame({'type': ['standard_sync', 'debug_old'],
'batch_size': batch_size,
'time': [full_stand, full_debug],
'iter_time': [iter_time_stand, iter_time_debug],
'avg_speed': [avg_speed_stand, avg_speed_debug]})
return pd.concat([save_df, df])
return None
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--batch-sizes', nargs='+', type=int, default=[1, 4, 8, 32, 64, 128],
help='List of batch sizes to run')
parser.add_argument('--thread-counts', nargs='+', type=int, default=[1, 2, 4, 8],
help='List of thread counts')
parser.add_argument('--num-samples', type=int, default=2048,
help='Number of samples')
parser.add_argument('--data-path', type=str,
help='Directory path of training dataset')
parser.add_argument('--save-dir', type=str,
help='Directory where to save results')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
df = None
for pipe_fun, num_threads in product([rn50_pipeline, rn50_pipeline_2], args.thread_counts):
if args.save_dir is not None:
import pandas as pd
save_file = os.path.join(args.save_dir,
f'bench_{pipe_fun.__name__}_threads_{num_threads}.csv')
if os.path.isfile(save_file):
df = pd.read_csv(save_file)
else:
df = pd.DataFrame(columns=['type', 'batch_size', 'time', 'iter_time', 'avg_speed'])
for batch_size in args.batch_sizes:
df = test_rn50_benchmark(rn50_pipeline_2, batch_size, num_threads, args.num_samples,
args.data_path, df)
if df is not None:
df.to_csv(save_file, index=False)
|
DALI-main
|
dali/test/python/test_pipeline_debug_resnet50.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import re
from functools import reduce
from nvidia.dali import fn
from nvidia.dali import tensors
from nvidia.dali import types
from nvidia.dali.experimental import eager
from nvidia.dali.pipeline import Pipeline, pipeline_def
from nvidia.dali._utils.eager_utils import _slice_tensorlist
from test_dali_cpu_only_utils import (pipeline_arithm_ops_cpu, setup_test_nemo_asr_reader_cpu,
setup_test_numpy_reader_cpu)
from test_detection_pipeline import coco_anchors
from test_utils import check_batch, get_dali_extra_path, get_files, module_functions
from segmentation_test_utils import make_batch_select_masks
from webdataset_base import generate_temp_index_file as generate_temp_wds_index
""" Tests of coverage of eager operators. For each operator results from standard pipeline and
eager version are compared across a couple of iterations.
If you have added a new operator you should add a test here for an eager version of it. Also make
sure you have correctly classified the operator in `dali/python/nvidia/dali/_utils/eager_utils.py`
as stateless, stateful or iterator.
"""
data_root = get_dali_extra_path()
images_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
audio_files = get_files(os.path.join('db', 'audio', 'wav'), 'wav')
caffe_dir = os.path.join(data_root, 'db', 'lmdb')
caffe2_dir = os.path.join(data_root, 'db', 'c2lmdb')
recordio_dir = os.path.join(data_root, 'db', 'recordio')
webdataset_dir = os.path.join(data_root, 'db', 'webdataset')
coco_dir = os.path.join(data_root, 'db', 'coco', 'images')
coco_annotation = os.path.join(data_root, 'db', 'coco', 'instances.json')
sequence_dir = os.path.join(data_root, 'db', 'sequence', 'frames')
video_files = get_files(os.path.join('db', 'video', 'vfr'), 'mp4')
rng = np.random.default_rng()
batch_size = 2
data_size = 10
sample_shape = [20, 20, 3]
# Sample data of image-like shape and type, used in many tests to avoid multiple object creation.
data = [[rng.integers(0, 255, size=sample_shape, dtype=np.uint8)
for _ in range(batch_size)] for _ in range(data_size)]
# Sample data for audio operators.
audio_data = [[rng.random(size=[200], dtype=np.float32)
for _ in range(batch_size)] for _ in range(data_size)]
# Sample data with single non-batch dimension.
flat_data = [[rng.integers(0, 255, size=[200], dtype=np.uint8)
for _ in range(batch_size)] for _ in range(data_size)]
def get_tl(data, layout='HWC'):
""" Utility function to create a TensorListCPU with given data and layout. """
layout = '' if layout is None or (data.ndim != 4 and layout == 'HWC') else layout
return tensors.TensorListCPU(data, layout=layout)
def get_data(i):
""" Callback function to access data (numpy array) at given index. Used for generating inputs
for standard pipelines.
"""
return data[i]
def get_data_eager(i, layout='HWC'):
""" Callback function to access data at given index returned as TensorListCPU. Used for
generating inputs for eager operators.
"""
return get_tl(np.array(get_data(i)), layout)
def get_multi_data_eager(n):
""" Used for generating multiple inputs for eager operators. """
def get(i, _):
return tuple(get_data_eager(i) for _ in range(n))
return get
class PipelineInput:
""" Class for generating inputs for pipeline.
Args:
pipe_fun: pipeline definition function.
args: arguments for the pipeline creation.
kwargs: possible keyword arguments used inside pipeline definition function.
"""
def __init__(self, pipe_fun, *args, **kwargs) -> None:
if kwargs:
self.pipe = pipe_fun(*args, kwargs)
else:
self.pipe = pipe_fun(*args)
self.pipe.build()
def __call__(self, *_):
return self.pipe.run()
class GetData:
""" Utility class implementing callback functions for pipeline and eager operators from
a single dataset.
"""
def __init__(self, data) -> None:
self.data = data
def fn_source(self, i):
return self.data[i]
def eager_source(self, i, layout='HWC'):
return get_tl(np.array(self.fn_source(i)), layout)
def get_ops(op_path, fn_op=None, eager_op=None, eager_module=eager):
""" Get fn and eager versions of operators from given path. """
import_path = op_path.split('.')
if fn_op is None:
fn_op = reduce(getattr, [fn] + import_path)
if eager_op is None:
eager_op = reduce(getattr, [eager_module] + import_path)
return fn_op, eager_op
def compare_eager_with_pipeline(pipe, eager_op, *, eager_source=get_data_eager, layout='HWC',
batch_size=batch_size, N_iterations=5, **kwargs):
""" Compares outputs from standard pipeline `pipe` and eager operator `eager_op` across
`N_iterations`.
"""
pipe.build()
for i in range(N_iterations):
input_tl = eager_source(i, layout)
out_fn = pipe.run()
if isinstance(input_tl, (tuple, list)):
if len(input_tl):
out_eager = eager_op(*input_tl, **kwargs)
else:
out_eager = eager_op(batch_size=batch_size, **kwargs)
else:
out_eager = eager_op(input_tl, **kwargs)
if not isinstance(out_eager, (tuple, list)):
out_eager = (out_eager,)
assert len(out_fn) == len(out_eager)
for tensor_out_fn, tensor_out_eager in zip(out_fn, out_eager):
assert type(tensor_out_fn) is type(tensor_out_eager)
if tensor_out_fn.dtype == types.BOOL:
for t_fn, t_eager in zip(tensor_out_fn, tensor_out_eager):
assert np.array_equal(t_fn, t_eager)
else:
check_batch(tensor_out_fn, tensor_out_eager, batch_size)
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def single_op_pipeline(op, kwargs, source=get_data, layout='HWC'):
data = fn.external_source(source=source, layout=layout)
out = op(data, **kwargs)
if isinstance(out, list):
out = tuple(out)
return out
def check_single_input(op_path, *, pipe_fun=single_op_pipeline, fn_source=get_data, fn_op=None,
eager_source=get_data_eager, eager_op=None, layout='HWC', **kwargs):
fn_op, eager_op = get_ops(op_path, fn_op, eager_op)
pipe = pipe_fun(fn_op, kwargs, source=fn_source, layout=layout)
compare_eager_with_pipeline(pipe, eager_op, eager_source=eager_source, layout=layout, **kwargs)
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def no_input_pipeline(op, kwargs):
out = op(**kwargs)
if isinstance(out, list):
out = tuple(out)
return out
def no_input_source(*_):
return ()
def check_no_input(op_path, *, fn_op=None, eager_op=None, batch_size=batch_size, N_iterations=5,
**kwargs):
fn_op, eager_op = get_ops(op_path, fn_op, eager_op)
pipe = no_input_pipeline(fn_op, kwargs)
compare_eager_with_pipeline(pipe, eager_op, eager_source=no_input_source,
batch_size=batch_size, N_iterations=N_iterations, **kwargs)
def prep_stateful_operators(op_path):
# Replicating seed that will be used inside rng_state, that way we expect fn and eager
# operators to return same results.
seed_upper_bound = (1 << 31) - 1
seed = rng.integers(seed_upper_bound)
fn_seed = np.random.default_rng(seed).integers(seed_upper_bound)
eager_state = eager.rng_state(seed)
fn_op, eager_op = get_ops(op_path, eager_module=eager_state)
return fn_op, eager_op, fn_seed
def check_single_input_stateful(op_path, pipe_fun=single_op_pipeline, fn_source=get_data,
fn_op=None, eager_source=get_data_eager, eager_op=None,
layout='HWC', **kwargs):
fn_op, eager_op, fn_seed = prep_stateful_operators(op_path)
kwargs['seed'] = fn_seed
pipe = pipe_fun(fn_op, kwargs, source=fn_source, layout=layout)
kwargs.pop('seed', None)
compare_eager_with_pipeline(pipe, eager_op, eager_source=eager_source, layout=layout, **kwargs)
def check_no_input_stateful(op_path, *, fn_op=None, eager_op=None, batch_size=batch_size,
N_iterations=5, **kwargs):
fn_op, eager_op, fn_seed = prep_stateful_operators(op_path)
kwargs['seed'] = fn_seed
pipe = no_input_pipeline(fn_op, kwargs)
kwargs.pop('seed', None)
compare_eager_with_pipeline(pipe, eager_op, eager_source=no_input_source,
batch_size=batch_size, N_iterations=N_iterations, **kwargs)
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def reader_pipeline(op, kwargs):
out = op(pad_last_batch=True, **kwargs)
if isinstance(out, list):
out = tuple(out)
return out
def check_reader(op_path, *, fn_op=None, eager_op=None, batch_size=batch_size,
N_iterations=2, **kwargs):
fn_op, eager_op = get_ops(op_path, fn_op, eager_op)
pipe = reader_pipeline(fn_op, kwargs)
pipe.build()
iter_eager = eager_op(batch_size=batch_size, **kwargs)
for _ in range(N_iterations):
for i, out_eager in enumerate(iter_eager):
out_fn = pipe.run()
if not isinstance(out_eager, (tuple, list)):
out_eager = (out_eager,)
assert len(out_fn) == len(out_eager)
for tensor_out_fn, tensor_out_eager in zip(out_fn, out_eager):
if i == len(iter_eager) - 1:
tensor_out_fn = _slice_tensorlist(tensor_out_fn, len(tensor_out_eager))
assert type(tensor_out_fn) is type(tensor_out_eager)
check_batch(tensor_out_fn, tensor_out_eager, len(tensor_out_eager))
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def file_reader_pipeline(kwargs):
data, _ = fn.readers.file(**kwargs)
return data
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def reader_op_pipeline(op, kwargs, source=None, layout=None):
if source is None:
raise RuntimeError('No source for file reader.')
data, _ = fn.readers.file(file_root=source)
out = op(data, **kwargs)
if isinstance(out, list):
out = tuple(out)
return out
def test_image_decoder():
check_single_input('decoders.image', pipe_fun=reader_op_pipeline, fn_source=images_dir,
eager_source=PipelineInput(file_reader_pipeline, file_root=images_dir),
output_type=types.RGB)
def test_rotate():
check_single_input('rotate', angle=25)
def test_brightness_contrast():
check_single_input('brightness_contrast')
def test_hue():
check_single_input('hue')
def test_brightness():
check_single_input('brightness')
def test_contrast():
check_single_input('contrast')
def test_hsv():
check_single_input('hsv')
def test_color_twist():
check_single_input('color_twist')
def test_saturation():
check_single_input('saturation')
def test_shapes():
check_single_input('shapes')
def test_crop():
check_single_input('crop', crop=(5, 5))
def test_color_space_coversion():
check_single_input('color_space_conversion', image_type=types.BGR, output_type=types.RGB)
def test_cast():
check_single_input('cast', dtype=types.INT32)
def test_resize():
check_single_input('resize', resize_x=50, resize_y=50)
def test_per_frame():
check_single_input('per_frame', replace=True)
def test_gaussian_blur():
check_single_input('gaussian_blur', window_size=5)
def test_laplacian():
check_single_input('laplacian', window_size=5)
def test_crop_mirror_normalize():
check_single_input('crop_mirror_normalize')
def test_flip():
check_single_input('flip', horizontal=True)
def test_jpeg_compression_distortion():
check_single_input('jpeg_compression_distortion', quality=10)
def test_image_decoder_crop_device():
check_single_input('decoders.image_crop', pipe_fun=reader_op_pipeline, fn_source=images_dir,
eager_source=PipelineInput(file_reader_pipeline, file_root=images_dir),
output_type=types.RGB, crop=(10, 10))
def test_reshape():
new_shape = sample_shape.copy()
new_shape[0] //= 2
new_shape[1] *= 2
check_single_input('reshape', shape=new_shape)
def test_reinterpret():
check_single_input('reinterpret', rel_shape=[0.5, 1, -1])
def test_water():
check_single_input('water')
def test_sphere():
check_single_input('sphere')
def test_erase():
check_single_input('erase', anchor=[0.3], axis_names='H',
normalized_anchor=True, shape=[0.1], normalized_shape=True)
def test_expand_dims():
check_single_input('expand_dims', axes=1, new_axis_names='Z')
def test_coord_transform():
M = [0, 0, 1,
0, 1, 0,
1, 0, 0]
check_single_input('coord_transform', M=M, dtype=types.UINT8)
def test_grid_mask():
check_single_input('grid_mask', tile=51, ratio=0.38158387, angle=2.6810782)
def test_multi_paste():
check_single_input('multi_paste', in_ids=np.array([0, 1]), output_size=sample_shape)
def test_nonsilent_region():
data = [[rng.integers(0, 255, size=[200], dtype=np.uint8)
for _ in range(batch_size)]] * data_size
data[0][0][0] = 0
data[0][1][0] = 0
data[0][1][1] = 0
get_data = GetData(data)
check_single_input('nonsilent_region', fn_source=get_data.fn_source,
eager_source=get_data.eager_source, layout='')
def test_preemphasis_filter():
get_data = GetData(audio_data)
check_single_input('preemphasis_filter', fn_source=get_data.fn_source,
eager_source=get_data.eager_source, layout=None)
def test_power_spectrum():
get_data = GetData(audio_data)
check_single_input('power_spectrum', fn_source=get_data.fn_source,
eager_source=get_data.eager_source, layout=None)
def test_spectrogram():
get_data = GetData(audio_data)
check_single_input('spectrogram', fn_source=get_data.fn_source,
eager_source=get_data.eager_source, layout=None, nfft=60, window_length=50,
window_step=25)
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def mel_filter_pipeline(source):
data = fn.external_source(source=source)
spectrum = fn.spectrogram(data, nfft=60, window_length=50, window_step=25)
processed = fn.mel_filter_bank(spectrum)
return processed
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def mel_filter_input_pipeline(source):
data = fn.external_source(source=source)
spectrum = fn.spectrogram(data, nfft=60, window_length=50, window_step=25)
return spectrum
def test_mel_filter_bank():
compare_eager_with_pipeline(mel_filter_pipeline(audio_data), eager.mel_filter_bank,
eager_source=PipelineInput(mel_filter_input_pipeline, audio_data))
def test_to_decibels():
get_data = GetData(audio_data)
check_single_input('to_decibels', fn_source=get_data.fn_source,
eager_source=get_data.eager_source, layout=None)
def test_audio_resample():
get_data = GetData(audio_data)
check_single_input('audio_resample', fn_source=get_data.fn_source,
eager_source=get_data.eager_source, layout=None, scale=1.25)
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def mfcc_pipeline(source):
data = fn.external_source(source=source)
spectrum = fn.spectrogram(data, nfft=60, window_length=50, window_step=25)
mel = fn.mel_filter_bank(spectrum)
dec = fn.to_decibels(mel)
processed = fn.mfcc(dec)
return processed
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def mfcc_input_pipeline(source):
data = fn.external_source(source=source)
spectrum = fn.spectrogram(data, nfft=60, window_length=50, window_step=25)
mel = fn.mel_filter_bank(spectrum)
dec = fn.to_decibels(mel)
return dec
def test_mfcc():
compare_eager_with_pipeline(mfcc_pipeline(audio_data), eager.mfcc,
eager_source=PipelineInput(mfcc_input_pipeline, audio_data))
def test_one_hot():
get_data = GetData(flat_data)
check_single_input('one_hot', fn_source=get_data.fn_source,
eager_source=get_data.eager_source, num_classes=256, layout=None)
def test_transpose():
check_single_input('transpose', perm=[2, 0, 1])
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def audio_decoder_pipeline():
data, _ = fn.readers.file(files=audio_files)
out = fn.decoders.audio(data)
return tuple(out)
def test_audio_decoder():
compare_eager_with_pipeline(audio_decoder_pipeline(), eager.decoders.audio,
eager_source=PipelineInput(file_reader_pipeline,
files=audio_files))
def test_coord_flip():
get_data = GetData([[(rng.integers(0, 255, size=[200, 2], dtype=np.uint8) /
255).astype(dtype=np.float32) for _ in range(batch_size)]
for _ in range(data_size)])
check_single_input('coord_flip', fn_source=get_data.fn_source,
eager_source=get_data.eager_source, layout=None)
def test_bb_flip():
get_data = GetData([[(rng.integers(0, 255, size=[200, 4], dtype=np.uint8) /
255).astype(dtype=np.float32) for _ in range(batch_size)]
for _ in range(data_size)])
check_single_input('bb_flip', fn_source=get_data.fn_source,
eager_source=get_data.eager_source, layout=None)
def test_warp_affine():
check_single_input('warp_affine', matrix=(0.1, 0.9, 10, 0.8, -0.2, -20))
def test_normalize():
check_single_input('normalize')
def test_lookup_table():
get_data = GetData([[rng.integers(0, 5, size=[100], dtype=np.uint8)
for _ in range(batch_size)] for _ in range(data_size)])
check_single_input('lookup_table', keys=[1, 3], values=[
10, 50], fn_source=get_data.fn_source, eager_source=get_data.eager_source,
layout=None)
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def slice_pipeline(get_anchor, get_shape):
data = fn.external_source(source=get_data, layout='HWC')
anchors = fn.external_source(source=get_anchor)
shape = fn.external_source(source=get_shape)
processed = fn.slice(data, anchors, shape, out_of_bounds_policy='pad')
return processed
def test_slice():
get_anchors = GetData([[(rng.integers(1, 256, size=[2], dtype=np.uint8) /
255).astype(dtype=np.float32) for _ in range(batch_size)]
for _ in range(data_size)])
get_shapes = GetData([[(rng.integers(1, 256, size=[2], dtype=np.uint8) /
255).astype(dtype=np.float32) for _ in range(batch_size)]
for _ in range(data_size)])
def eager_source(i, _):
return get_data_eager(i), get_anchors.eager_source(i), get_shapes.eager_source(i)
pipe = slice_pipeline(get_anchors.fn_source, get_shapes.fn_source)
compare_eager_with_pipeline(pipe, eager.slice, eager_source=eager_source,
out_of_bounds_policy='pad')
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def image_decoder_slice_pipeline(get_anchors, get_shape):
input, _ = fn.readers.file(file_root=images_dir)
anchors = fn.external_source(source=get_anchors)
shape = fn.external_source(source=get_shape)
processed = fn.decoders.image_slice(input, anchors, shape)
return processed
def test_image_decoder_slice():
get_anchors = GetData([[(rng.integers(1, 128, size=[2], dtype=np.uint8) /
255).astype(dtype=np.float32) for _ in range(batch_size)]
for _ in range(data_size)])
get_shapes = GetData([[(rng.integers(1, 128, size=[2], dtype=np.uint8) /
255).astype(dtype=np.float32) for _ in range(batch_size)]
for _ in range(data_size)])
eager_input = file_reader_pipeline({'file_root': images_dir})
eager_input.build()
def eager_source(i, _):
return (eager_input.run()[0], get_anchors.eager_source(i, None),
get_shapes.eager_source(i, None))
pipe = image_decoder_slice_pipeline(get_anchors.fn_source, get_shapes.fn_source)
compare_eager_with_pipeline(pipe, eager.decoders.image_slice, eager_source=eager_source)
def test_pad():
get_data = GetData([[rng.integers(0, 255, size=[5, 4, 3], dtype=np.uint8)
for _ in range(batch_size)] for _ in range(data_size)])
check_single_input('pad', fn_source=get_data.fn_source,
eager_source=get_data.eager_source, fill_value=-1, axes=(0,), shape=(10,))
def test_file_reader():
check_reader('readers.file', file_root=images_dir)
def test_mxnet_reader():
check_reader('readers.mxnet', path=os.path.join(recordio_dir, 'train.rec'),
index_path=os.path.join(recordio_dir, 'train.idx'), shard_id=0, num_shards=1)
def test_webdataset_reader():
webdataset = os.path.join(webdataset_dir, 'MNIST', 'devel-0.tar')
webdataset_idx = generate_temp_wds_index(webdataset)
check_reader('readers.webdataset',
paths=webdataset,
index_paths=webdataset_idx.name,
ext=['jpg', 'cls'],
shard_id=0, num_shards=1)
def test_coco_reader():
check_reader('readers.coco', file_root=coco_dir,
annotations_file=coco_annotation, shard_id=0, num_shards=1)
def test_caffe_reader():
check_reader('readers.caffe', path=caffe_dir, shard_id=0, num_shards=1)
def test_caffe2_reader():
check_reader('readers.caffe2', path=caffe2_dir, shard_id=0, num_shards=1)
def test_nemo_asr_reader():
tmp_dir, nemo_asr_manifest = setup_test_nemo_asr_reader_cpu()
with tmp_dir:
check_reader('readers.nemo_asr', manifest_filepaths=[nemo_asr_manifest], dtype=types.INT16,
downmix=False, read_sample_rate=True, read_text=True, seed=1234)
def test_video_reader():
check_reader('experimental.readers.video', filenames=video_files,
labels=[0, 1], sequence_length=10)
def test_copy():
check_single_input('copy')
def test_element_extract():
check_single_input('element_extract', element_map=[0, 3], layout=None)
def test_bbox_paste():
get_data = GetData([[(rng.integers(0, 255, size=[200, 4], dtype=np.uint8) /
255).astype(dtype=np.float32) for _ in range(batch_size)]
for _ in range(data_size)])
check_single_input('bbox_paste', fn_source=get_data.fn_source,
eager_source=get_data.eager_source, layout=None, paste_x=0.25,
paste_y=0.25, ratio=1.5)
def test_sequence_rearrange():
get_data = GetData([[rng.integers(0, 255, size=[5, 10, 20, 3], dtype=np.uint8)
for _ in range(batch_size)] for _ in range(data_size)])
check_single_input('sequence_rearrange', new_order=[0, 4, 1, 3, 2],
fn_source=get_data.fn_source, eager_source=get_data.eager_source,
layout='FHWC')
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def box_encoder_pipeline(get_boxes, get_labels):
boxes = fn.external_source(source=get_boxes)
labels = fn.external_source(source=get_labels)
out = fn.box_encoder(boxes, labels, anchors=coco_anchors())
return tuple(out)
def test_box_encoder():
get_boxes = GetData([[(rng.integers(0, 255, size=[20, 4], dtype=np.uint8) / 255)
.astype(dtype=np.float32) for _ in range(batch_size)]
for _ in range(data_size)])
get_labels = GetData([[rng.integers(0, 255, size=[20, 1], dtype=np.int32)
for _ in range(batch_size)] for _ in range(data_size)])
def eager_source(i, _):
return get_boxes.eager_source(i), get_labels.eager_source(i)
pipe = box_encoder_pipeline(get_boxes.fn_source, get_labels.fn_source)
compare_eager_with_pipeline(pipe, eager.box_encoder,
eager_source=eager_source, anchors=coco_anchors())
def test_numpy_reader():
with setup_test_numpy_reader_cpu() as test_data_root:
check_reader('readers.numpy', file_root=test_data_root)
def test_constant():
check_no_input('constant', fdata=(1.25, 2.5, 3))
def test_dump_image():
check_single_input('dump_image')
def test_sequence_reader():
check_reader('readers.sequence', file_root=sequence_dir,
sequence_length=2, shard_id=0, num_shards=1)
def test_affine_translate():
check_no_input('transforms.translation', offset=(2, 3))
def test_affine_scale():
check_no_input('transforms.scale', scale=(2, 3))
def test_affine_rotate():
check_no_input('transforms.rotation', angle=30.0)
def test_affine_shear():
check_no_input('transforms.shear', shear=(2., 1.))
def test_affine_crop():
check_no_input('transforms.crop', from_start=(0.1, 0.2), from_end=(1., 1.2),
to_start=(0.2, 0.3), to_end=(0.5, 0.6))
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def combine_transforms_pipeline():
t = fn.transforms.translation(offset=(1, 2))
r = fn.transforms.rotation(angle=30.0)
s = fn.transforms.scale(scale=(2, 3))
out = fn.transforms.combine(t, r, s)
return out
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def combine_transforms_input_pipeline():
t = fn.transforms.translation(offset=(1, 2))
r = fn.transforms.rotation(angle=30.0)
s = fn.transforms.scale(scale=(2, 3))
return t, r, s
def test_combine_transforms():
compare_eager_with_pipeline(combine_transforms_pipeline(), eager.transforms.combine,
eager_source=PipelineInput(combine_transforms_input_pipeline))
def test_reduce_min():
check_single_input('reductions.min')
def test_reduce_max():
check_single_input('reductions.max')
def test_reduce_sum():
check_single_input('reductions.sum')
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def segmentation_select_masks_pipeline(source):
device = 'cpu' if Pipeline.current().device_id is None else 'gpu'
polygons, vertices, selected_masks = fn.external_source(
source=source, num_outputs=3, device=device)
out_polygons, out_vertices = fn.segmentation.select_masks(
selected_masks, polygons, vertices, reindex_masks=False)
return out_polygons, out_vertices
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def segmentation_select_masks_input_pipeline(source):
device = 'cpu' if Pipeline.current().device_id is None else 'gpu'
polygons, vertices, selected_masks = fn.external_source(
source=source, num_outputs=3, device=device)
return selected_masks, polygons, vertices
def test_segmentation_select_masks():
data = [make_batch_select_masks(batch_size, vertex_ndim=2, npolygons_range=(1, 5),
nvertices_range=(3, 10)) for _ in range(data_size)]
pipe = segmentation_select_masks_pipeline(data)
compare_eager_with_pipeline(pipe, eager.segmentation.select_masks, eager_source=PipelineInput(
segmentation_select_masks_input_pipeline, data))
def test_reduce_mean():
check_single_input('reductions.mean')
def test_reduce_mean_square():
check_single_input('reductions.mean_square')
def test_reduce_root_mean_square():
check_single_input('reductions.rms')
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def reduce_pipeline(op):
data = fn.external_source(source=get_data)
mean = fn.reductions.mean(data)
out = op(data, mean)
return out
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def reduce_input_pipeline():
data = fn.external_source(source=get_data)
mean = fn.reductions.mean(data)
return data, mean
def test_reduce_std():
pipe = reduce_pipeline(fn.reductions.std_dev)
compare_eager_with_pipeline(pipe, eager_op=eager.reductions.std_dev,
eager_source=PipelineInput(reduce_input_pipeline))
def test_reduce_variance():
pipe = reduce_pipeline(fn.reductions.variance)
compare_eager_with_pipeline(pipe, eager_op=eager.reductions.variance,
eager_source=PipelineInput(reduce_input_pipeline))
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def multi_input_pipeline(op, n):
data = [fn.external_source(source=get_data, layout='HWC') for _ in range(n)]
out = op(*data)
return out
def test_cat():
num_inputs = 3
compare_eager_with_pipeline(multi_input_pipeline(fn.cat, num_inputs), eager_op=eager.cat,
eager_source=get_multi_data_eager(num_inputs))
def test_stack():
num_inputs = 3
compare_eager_with_pipeline(multi_input_pipeline(fn.stack, num_inputs), eager_op=eager.stack,
eager_source=get_multi_data_eager(num_inputs))
def test_batch_permute():
check_single_input('permute_batch', indices=rng.permutation(batch_size).tolist())
def test_squeeze():
get_data = GetData([[np.zeros(shape=[10, 20, 3, 1, 1], dtype=np.uint8)
for _ in range(batch_size)]]*data_size)
check_single_input('squeeze', fn_source=get_data.fn_source, eager_source=get_data.eager_source,
axis_names='YZ', layout='HWCYZ')
def test_peek_image_shape():
check_single_input('peek_image_shape', pipe_fun=reader_op_pipeline, fn_source=images_dir,
eager_source=PipelineInput(file_reader_pipeline, file_root=images_dir))
def test_subscript_dim_check():
check_single_input('subscript_dim_check', num_subscripts=3)
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def get_property_pipeline(files):
data, _ = fn.readers.file(files=files)
out = fn.get_property(data, key='source_info')
return out
def test_get_property():
root_path = os.path.join(data_root, 'db', 'single', 'png', '0')
files = [os.path.join(root_path, i) for i in os.listdir(root_path)]
pipe = get_property_pipeline(files)
compare_eager_with_pipeline(pipe, eager.get_property, eager_source=PipelineInput(
file_reader_pipeline, files=files), key='source_info')
def test_tensor_subscript():
check_single_input('tensor_subscript', lo_0=1, hi_1=-1, at_2=1)
def eager_arithm_ops(data):
return (data * 2,
data + 2,
data - 2,
data / 2,
data // 2,
data ** 2,
data == 2,
data != 2,
data < 2,
data <= 2,
data > 2,
data >= 2,
data & 2,
data | 2,
data ^ 2,
eager.math.abs(data),
eager.math.fabs(data),
eager.math.floor(data),
eager.math.ceil(data),
eager.math.pow(data, 2),
eager.math.fpow(data, 1.5),
eager.math.min(data, 2),
eager.math.max(data, 50),
eager.math.clamp(data, 10, 50),
eager.math.sqrt(data),
eager.math.rsqrt(data),
eager.math.cbrt(data),
eager.math.exp(data),
eager.math.exp(data),
eager.math.log(data),
eager.math.log2(data),
eager.math.log10(data),
eager.math.sin(data),
eager.math.cos(data),
eager.math.tan(data),
eager.math.asin(data),
eager.math.acos(data),
eager.math.atan(data),
eager.math.atan2(data, 3),
eager.math.sinh(data),
eager.math.cosh(data),
eager.math.tanh(data),
eager.math.asinh(data),
eager.math.acosh(data),
eager.math.atanh(data))
def test_arithm_ops():
with eager.arithmetic():
pipe = pipeline_arithm_ops_cpu(get_data, batch_size=batch_size,
num_threads=4, device_id=None)
compare_eager_with_pipeline(pipe, eager_op=eager_arithm_ops)
def test_image_decoder_random_crop():
check_single_input_stateful('decoders.image_random_crop', pipe_fun=reader_op_pipeline,
fn_source=images_dir, eager_source=PipelineInput(
file_reader_pipeline, file_root=images_dir),
output_type=types.RGB)
def test_noise_gaussian():
check_single_input_stateful('noise.gaussian')
def test_noise_salt_and_pepper():
check_single_input_stateful('noise.salt_and_pepper')
def test_noise_shot():
check_single_input_stateful('noise.shot')
def test_random_mask_pixel():
check_single_input_stateful('segmentation.random_mask_pixel')
def test_random_resized_crop():
check_single_input_stateful('random_resized_crop', size=[5, 5])
def test_random_object_bbox():
data = tensors.TensorListCPU([tensors.TensorCPU(
np.int32([[1, 0, 0, 0],
[1, 2, 2, 1],
[1, 1, 2, 0],
[2, 0, 0, 1]])), tensors.TensorCPU(
np.int32([[0, 3, 3, 0],
[1, 0, 1, 2],
[0, 1, 1, 0],
[0, 2, 0, 1],
[0, 2, 2, 1]]))])
def source(*_):
return data
check_single_input_stateful('segmentation.random_object_bbox',
fn_source=source, eager_source=source, layout="")
def test_fast_resize_crop_mirror():
check_single_input_stateful('fast_resize_crop_mirror', crop=[5, 5], resize_shorter=10)
def test_roi_random_crop():
shape = [10, 20, 3]
check_single_input_stateful('roi_random_crop',
crop_shape=[x // 2 for x in shape],
roi_start=[x // 4 for x in shape],
roi_shape=[x // 2 for x in shape])
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=None)
def random_bbox_crop_pipeline(get_boxes, get_labels, seed):
boxes = fn.external_source(source=get_boxes)
labels = fn.external_source(source=get_labels)
out = fn.random_bbox_crop(boxes, labels, aspect_ratio=[0.5, 2.0], thresholds=[
0.1, 0.3, 0.5], scaling=[0.8, 1.0], bbox_layout="xyXY", seed=seed)
return tuple(out)
def test_random_bbox_crop():
get_boxes = GetData([[(rng.integers(0, 255, size=[200, 4], dtype=np.uint8) / 255).astype(
dtype=np.float32) for _ in range(batch_size)] for _ in range(data_size)])
get_labels = GetData([[rng.integers(0, 255, size=[200, 1], dtype=np.int32) for _ in
range(batch_size)] for _ in range(data_size)])
def eager_source(i, _):
return get_boxes.eager_source(i), get_labels.eager_source(i)
_, eager_op, fn_seed = prep_stateful_operators('random_bbox_crop')
pipe = random_bbox_crop_pipeline(get_boxes.fn_source, get_labels.fn_source, fn_seed)
compare_eager_with_pipeline(pipe, eager_op, eager_source=eager_source, aspect_ratio=[0.5, 2.0],
thresholds=[0.1, 0.3, 0.5], scaling=[0.8, 1.0],
bbox_layout="xyXY")
def test_resize_crop_mirror():
check_single_input_stateful('resize_crop_mirror', crop=[5, 5], resize_shorter=10)
def test_random_coin_flip():
check_no_input_stateful('random.coin_flip')
def test_normal_distribution():
check_no_input_stateful('random.normal', shape=[5, 5])
def test_random_uniform():
check_no_input_stateful('random.uniform')
def test_batch_permutation():
check_no_input_stateful('batch_permutation')
tested_methods = [
'decoders.image',
'rotate',
'brightness_contrast',
'hue',
'brightness',
'contrast',
'hsv',
'color_twist',
'saturation',
'shapes',
'crop',
'color_space_conversion',
'cast',
'resize',
'per_frame',
'gaussian_blur',
'laplacian',
'crop_mirror_normalize',
'flip',
'jpeg_compression_distortion',
'decoders.image_crop',
'reshape',
'reinterpret',
'water',
'sphere',
'erase',
'expand_dims',
'coord_transform',
'grid_mask',
'multi_paste',
'nonsilent_region',
'preemphasis_filter',
'power_spectrum',
'spectrogram',
'mel_filter_bank',
'to_decibels',
'audio_resample',
'mfcc',
'one_hot',
'transpose',
'decoders.audio',
'coord_flip',
'bb_flip',
'warp_affine',
'normalize',
'lookup_table',
'slice',
'decoders.image_slice',
'pad',
'readers.file',
'readers.mxnet',
'readers.webdataset',
'readers.coco',
'readers.caffe',
'readers.caffe2',
'readers.nemo_asr',
'experimental.readers.video',
'copy',
'element_extract',
'bbox_paste',
'sequence_rearrange',
'box_encoder',
'readers.numpy',
'constant',
'dump_image',
'readers.sequence',
'transforms.translation',
'transforms.scale',
'transforms.rotation',
'transforms.shear',
'transforms.crop',
'transforms.combine',
'reductions.min',
'reductions.max',
'reductions.sum',
'segmentation.select_masks',
'reductions.mean',
'reductions.mean_square',
'reductions.rms',
'reductions.std_dev',
'reductions.variance',
'cat',
'stack',
'permute_batch',
'squeeze',
'peek_image_shape',
'subscript_dim_check',
'get_property',
'tensor_subscript',
'arithmetic_generic_op',
'decoders.image_random_crop',
'noise.gaussian',
'noise.salt_and_pepper',
'noise.shot',
'segmentation.random_mask_pixel',
'segmentation.random_object_bbox',
'fast_resize_crop_mirror',
'roi_random_crop',
'random_bbox_crop',
'random_resized_crop',
'resize_crop_mirror',
'random.coin_flip',
'random.normal',
'random.uniform',
'batch_permutation',
]
excluded_methods = [
'hidden.*',
'jitter', # not supported for CPU
'video_reader', # not supported for CPU
'video_reader_resize', # not supported for CPU
'readers.video', # not supported for CPU
'readers.video_resize', # not supported for CPU
'optical_flow', # not supported for CPU
'paste', # not supported for CPU
'experimental.inflate', # not supported for CPU
]
def test_coverage():
""" Checks coverage of eager operators (almost every operator is also exposed in eager mode).
If you added a new operator, you should also add a test for it here and add the operator name
to the ``tested_methods`` list. You should also add eager classification for your operator in
`dali/python/nvidia/dali/_utils/eager_utils.py`.
"""
methods = module_functions(eager, remove_prefix="nvidia.dali.experimental.eager")
methods += module_functions(
eager.rng_state(), remove_prefix='rng_state', check_non_module=True)
# TODO(ksztenderski): Add coverage for GPU operators.
exclude = "|".join(
["(^" + x.replace(".", "\.").replace("*", ".*").replace("?", ".") + "$)" # noqa: W605
for x in excluded_methods])
exclude = re.compile(exclude)
methods = [x for x in methods if not exclude.match(x)]
assert set(methods).difference(set(tested_methods)) == set(
), "Test doesn't cover:\n {}".format(set(methods) - set(tested_methods))
|
DALI-main
|
dali/test/python/test_eager_coverage.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import pipeline_def
from nvidia.dali.fn import readers
from nose.tools import assert_equal
import tempfile
from subprocess import call
import os
import tarfile
test_batch_size = 4
wds2idx_script = "../../../tools/wds2idx.py"
@pipeline_def()
def webdataset_raw_pipeline(
paths,
index_paths,
ext,
case_sensitive_extensions=True,
missing_component_behavior="empty",
dtypes=None,
dont_use_mmap=False,
num_shards=1,
shard_id=0,
skip_cached_images=False,
pad_last_batch=False,
lazy_init=False,
read_ahead=False,
stick_to_shard=False,
):
out = readers.webdataset(
paths=paths,
index_paths=index_paths,
ext=ext,
case_sensitive_extensions=case_sensitive_extensions,
missing_component_behavior=missing_component_behavior,
dtypes=dtypes,
dont_use_mmap=dont_use_mmap,
prefetch_queue_depth=1,
num_shards=num_shards,
shard_id=shard_id,
stick_to_shard=stick_to_shard,
skip_cached_images=skip_cached_images,
pad_last_batch=pad_last_batch,
lazy_init=lazy_init,
read_ahead=read_ahead,
)
return out if not isinstance(out, list) else tuple(out)
def filter_ext(files, exts):
if isinstance(exts, str):
exts = {exts}
return list(filter(lambda s: any(map(lambda ext: s.endswith("." + ext), exts)), files))
@pipeline_def()
def file_reader_pipeline(
files,
exts=None,
dont_use_mmap=False,
num_shards=1,
shard_id=0,
skip_cached_images=False,
pad_last_batch=False,
lazy_init=False,
read_ahead=False,
stick_to_shard=False,
):
if not isinstance(exts, list):
exts = [exts]
return tuple(
readers.file(
files=filter_ext(files, ext),
dont_use_mmap=dont_use_mmap,
prefetch_queue_depth=1,
num_shards=num_shards,
shard_id=shard_id,
stick_to_shard=stick_to_shard,
skip_cached_images=skip_cached_images,
pad_last_batch=pad_last_batch,
lazy_init=lazy_init,
read_ahead=read_ahead,
)[0]
if type(ext) in {str, set}
else ext
for ext in exts
)
def generate_temp_index_file(tar_file_path):
global wds2idx_script
temp_index_file = tempfile.NamedTemporaryFile()
assert_equal(
call([wds2idx_script, tar_file_path, temp_index_file.name],
stdout=open(os.devnull, "wb")), 0)
return temp_index_file
def generate_temp_extract(tar_file_path):
temp_extract_dir = tempfile.TemporaryDirectory()
archive = tarfile.open(tar_file_path)
for member in archive:
if member.type != tarfile.REGTYPE:
continue
archive.extract(member, temp_extract_dir.name)
return temp_extract_dir
|
DALI-main
|
dali/test/python/webdataset_base.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nose_utils # noqa:F401 - for Python 3.10
import nvidia.dali.fn as fn
from nvidia.dali import pipeline_def
import nvidia.dali.types as types
from test_utils import get_files, to_array
import numpy as np
import librosa
import torch
import math
import random
import os
from nose.tools import nottest
# Filtering librispeech samples
audio_files = get_files('db/audio/wav', 'wav')
audio_files = [file for file in audio_files if '237-134500' in file]
npy_files = [os.path.splitext(fpath)[0] + '.npy' for fpath in audio_files]
npy_files_sr = 16000
# From DeepLearningExamples
def _convert_samples_to_float32(samples):
"""Convert sample type to float32.
Audio sample type is usually integer or float-point.
Integers will be scaled to [-1, 1] in float32.
"""
float32_samples = samples.astype('float32')
if samples.dtype in np.sctypes['int']:
bits = np.iinfo(samples.dtype).bits
float32_samples *= (1. / 2 ** (bits - 1))
elif samples.dtype in np.sctypes['float']:
pass
else:
raise TypeError("Unsupported sample type: %s." % samples.dtype)
return float32_samples
torch_windows = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}
def stack_subsample_frames(x, stacking=1, subsampling=1):
""" Stacks frames together across feature dim, and then subsamples
input is batch_size, feature_dim, num_frames
output is batch_size, feature_dim * stacking, num_frames / subsampling
"""
seq = [x]
for n in range(1, stacking):
tmp = torch.zeros_like(x)
tmp[:, :, :-n] = x[:, :, n:]
seq.append(tmp)
x = torch.cat(seq, dim=1)[:, :, ::subsampling]
return x
class FilterbankFeatures():
def __init__(self,
sample_rate=16000,
window_size=0.02,
window_stride=0.01,
window="hann",
normalize="per_feature",
n_fft=None,
pad_amount=0,
preemph=0.97,
nfilt=64,
lowfreq=0,
highfreq=None,
log=True,
frame_splicing_stack=1,
frame_splicing_subsample=1):
self.win_length = int(sample_rate * window_size)
self.hop_length = int(sample_rate * window_stride)
self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length))
self.normalize = normalize
self.log = log
self.frame_splicing_stack = frame_splicing_stack
self.frame_splicing_subsample = frame_splicing_subsample
self.nfilt = nfilt
self.pad_amount = pad_amount
self.preemph = preemph
window_fn = torch_windows.get(window, None)
self.window = window_fn(self.win_length, periodic=False) if window_fn else None
filters = librosa.filters.mel(sr=sample_rate,
n_fft=self.n_fft,
n_mels=nfilt,
fmin=lowfreq,
fmax=highfreq)
self.fb = torch.tensor(filters, dtype=torch.float).unsqueeze(0)
@staticmethod
def normalize_batch(x, seq_len, normalize_type):
constant = 1e-5
if normalize_type == "per_feature":
x_mean = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype, device=x.device)
x_std = torch.zeros_like(x_mean)
for i in range(x.shape[0]):
x_mean[i, :] = x[i, :, :seq_len[i]].mean(dim=1)
x_std[i, :] = x[i, :, :seq_len[i]].std(dim=1)
# make sure x_std is not zero
x_std += constant
return (x - x_mean.unsqueeze(2)) / x_std.unsqueeze(2)
elif normalize_type == "all_features":
x_mean = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
x_std = torch.zeros_like(x_mean)
for i in range(x.shape[0]):
x_mean[i] = x[i, :, :seq_len[i].item()].mean()
x_std[i] = x[i, :, :seq_len[i].item()].std()
# make sure x_std is not zero
x_std += constant
return (x - x_mean.view(-1, 1, 1)) / x_std.view(-1, 1, 1)
else:
return x
def get_seq_len(self, seq_len):
return seq_len.to(dtype=torch.int) // self.hop_length + 1
def forward(self, inp, seq_len):
x = inp
dtype = x.dtype
if self.pad_amount > 0:
x = torch.nn.functional.pad(
x.unsqueeze(1), (self.pad_amount, self.pad_amount), "reflect"
).squeeze(1)
seq_len = seq_len + 2 * self.pad_amount
seq_len = self.get_seq_len(seq_len)
# do preemphasis
if self.preemph is not None:
x = torch.cat((x[:, 0].unsqueeze(1), x[:, 1:] - self.preemph * x[:, :-1]),
dim=1)
# do stft
x = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_length,
win_length=self.win_length, pad_mode='reflect',
center=True, window=self.window.to(dtype=torch.float).to(x.device))
# get power spectrum
x = x.pow(2).sum(-1)
# dot with filterbank energies
x = torch.matmul(self.fb.to(x.dtype), x)
# log features if required
if self.log:
x = torch.log(x + 1e-20)
# frame splicing if required
if self.frame_splicing_stack > 1 or self.frame_splicing_subsample:
x = stack_subsample_frames(
x, stacking=self.frame_splicing_stack, subsampling=self.frame_splicing_subsample)
# normalize if required
if self.normalize:
x = self.normalize_batch(x, seq_len, normalize_type=self.normalize)
# mask to zero any values beyond seq_len in batch,
# pad to multiple of `pad_to` (for efficiency)
max_len = x.size(-1)
seq = torch.arange(max_len).to(seq_len.dtype).to(x.device)
mask = seq.expand(x.size(0), max_len) >= seq_len.unsqueeze(1)
x = x.masked_fill(mask.unsqueeze(1).to(device=x.device), 0)
return x.to(dtype)
def dali_run(pipe, device):
pipe.build()
outs = pipe.run()
return to_array(outs[0])[0]
def win_args(sample_rate, window_size_sec, window_stride_sec):
win_length = int(sample_rate * window_size_sec) # frame size
hop_length = int(sample_rate * window_stride_sec)
return win_length, hop_length
def torch_spectrogram(audio, sample_rate, device='cpu',
window_size=0.02, window_stride=0.01,
center=True, pad_mode='reflect',
window="hann", n_fft=None):
audio = torch.tensor(audio, dtype=torch.float32)
if device == 'gpu':
audio = audio.cuda()
win_length, hop_length = win_args(sample_rate, window_size, window_stride)
n_fft = n_fft or 2 ** math.ceil(math.log2(win_length))
window_fn = torch_windows.get(window, None)
window_tensor = window_fn(win_length, periodic=False) if window_fn else None
stft_out = torch.stft(audio, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, pad_mode=pad_mode,
center=center, window=window_tensor.to(dtype=torch.float))
# get power spectrum
spectrogram = stft_out.pow(2).sum(-1)
spectrogram = spectrogram.cpu().numpy()
return spectrogram
def torch_mel_fbank(spectrogram, sample_rate, device='cpu',
nfilt=64, lowfreq=0, highfreq=None):
spectrogram = torch.tensor(spectrogram, dtype=torch.float32)
if device == 'gpu':
spectrogram = spectrogram.cuda()
n_fft = 2 * (spectrogram.shape[0] - 1)
filterbanks = torch.tensor(
librosa.filters.mel(
sample_rate, n_fft, n_mels=nfilt, fmin=lowfreq, fmax=highfreq), dtype=torch.float)
if device == 'gpu':
filterbanks = filterbanks.cuda()
mel_spectrogram = torch.matmul(filterbanks.to(spectrogram.dtype), spectrogram)
mel_spectrogram = mel_spectrogram.cpu().numpy()
return mel_spectrogram
def torch_log(x, device='cpu'):
x = torch.tensor(x, dtype=torch.float32)
if device == 'gpu':
x = x.cuda()
log_x = torch.log(x + 1e-20)
log_x = log_x.cpu().numpy()
return log_x
def torch_preemphasis(x, preemph, device='cpu'):
x = torch.tensor(x, dtype=torch.float32)
if device == 'gpu':
x = x.cuda()
y = torch.cat((x[0].unsqueeze(0), x[1:] - preemph * x[:-1]), dim=0)
y = y.cpu().numpy()
return y
def torch_normalize(mel_spec, normalize_type, seq_len=None, device='cpu'):
mel_spec = torch.tensor(mel_spec, dtype=torch.float32).unsqueeze(0)
if seq_len is None:
seq_len = torch.tensor(mel_spec.shape[2]).unsqueeze(0)
if device == 'gpu':
mel_spec = mel_spec.cuda()
out = FilterbankFeatures().normalize_batch(
mel_spec, seq_len, normalize_type=normalize_type)
out = out.cpu().numpy().squeeze(0)
return out
def torch_frame_splicing(mel_spec, stacking=1, subsampling=1, device='cpu'):
mel_spec = torch.tensor(mel_spec, dtype=torch.float32).unsqueeze(0)
if device == 'gpu':
mel_spec = mel_spec.cuda()
out = stack_subsample_frames(mel_spec, stacking=stacking, subsampling=subsampling)
out = out.cpu().numpy().squeeze(0)
return out
def dali_frame_splicing_graph(x, nfeatures, x_len, stacking=1, subsampling=1):
if stacking > 1:
seq = [x]
for n in range(1, stacking):
f = fn.slice(x, n, x_len, axes=(1,), out_of_bounds_policy='pad', fill_values=0)
seq.append(f)
x = fn.cat(*seq, axis=0)
nfeatures = nfeatures * stacking
if subsampling > 1:
out_len = (x_len + subsampling - 1) // subsampling
m = fn.transforms.scale(scale=[subsampling, 1], center=[0.5, 0])
x = fn.reshape(x, rel_shape=[1, 1, -1], layout="HWC") # Layout required by WarpAffine
size = fn.cat(nfeatures, out_len)
x = fn.warp_affine(x, matrix=m, size=size, interp_type=types.INTERP_NN)
x = fn.reshape(x, rel_shape=[1, 1], layout="ft")
return x
def torch_reflect_pad(x, pad_amount, device='cpu'):
x = torch.tensor(x, dtype=torch.float32).unsqueeze(0)
if device == 'gpu':
x = x.cuda()
x = torch.nn.functional.pad(
x.unsqueeze(1), (pad_amount, pad_amount), "reflect"
).squeeze(1)
x = x.cpu().numpy().squeeze(0)
return x
def dali_reflect_pad_graph(x, x_len, pad_amount):
def flip_1d(x):
# TODO(janton): remove the layout trick when Flip supports arbitrary data layouts
x = fn.reshape(x, shape=(-1, 1, 1), layout="HWC")
x = fn.flip(x, vertical=1)
x = fn.reshape(x, shape=(-1,), layout="t")
return x
pad_start = fn.slice(x, 1, pad_amount, axes=(0,))
pad_start = flip_1d(pad_start)
pad_end = fn.slice(x, x_len - pad_amount - 1, pad_amount, axes=(0,))
pad_end = flip_1d(pad_end)
x = fn.cat(pad_start, x, pad_end, axis=0)
return x
@pipeline_def(batch_size=1, device_id=0, num_threads=3)
def rnnt_train_pipe(files,
sample_rate,
pad_amount=0,
preemph_coeff=.97,
window_size=.02,
window_stride=.01,
window="hann",
nfeatures=64,
nfft=512,
frame_splicing_stack=1,
frame_splicing_subsample=1,
lowfreq=0.0,
highfreq=None,
normalize_type="per_feature",
speed_perturb=False,
silence_trim=False,
device="cpu"):
assert normalize_type == "per_feature" or normalize_type == "all_features"
norm_axes = [1] if normalize_type == 'per_feature' else [0, 1]
win_len, win_hop = win_args(sample_rate, window_size, window_stride)
window_fn = torch_windows.get(window, None)
window_fn_arg = window_fn(win_len, periodic=False).numpy().tolist() if window_fn else None
data, _ = fn.readers.file(files=files, device="cpu", random_shuffle=False)
audio, _ = fn.decoders.audio(data, dtype=types.FLOAT, downmix=True)
# splicing with subsampling doesn't work if audio_len is a GPU data node
if device == 'gpu' and frame_splicing_subsample == 1:
audio = audio.gpu()
# Speed perturbation 0.85x - 1.15x
if speed_perturb:
target_sr_factor = fn.random.uniform(device="cpu", range=(1 / 1.15, 1 / 0.85))
audio = fn.audio_resample(audio, scale=target_sr_factor)
# Silence trimming
if silence_trim:
begin, length = fn.nonsilent_region(audio, cutoff_db=-80)
audio = fn.slice(audio, begin, length, axes=[0])
audio_shape = fn.shapes(audio, dtype=types.INT32)
orig_audio_len = fn.slice(audio_shape, 0, 1, axes=(0,))
# If we couldn't move to GPU earlier, do it now
if device == 'gpu' and frame_splicing_subsample > 1:
audio = audio.gpu()
if pad_amount > 0:
audio_len = orig_audio_len + 2 * pad_amount
padded_audio = dali_reflect_pad_graph(audio, orig_audio_len, pad_amount)
else:
audio_len = orig_audio_len
padded_audio = audio
# Preemphasis filter
preemph_audio = fn.preemphasis_filter(padded_audio, preemph_coeff=preemph_coeff, border='zero')
# Spectrogram
spec_len = audio_len // win_hop + 1
spec = fn.spectrogram(preemph_audio,
nfft=nfft,
window_fn=window_fn_arg,
window_length=win_len,
window_step=win_hop,
center_windows=True,
reflect_padding=True)
# Mel spectrogram
mel_spec = fn.mel_filter_bank(
spec, sample_rate=sample_rate, nfilter=nfeatures, freq_low=lowfreq, freq_high=highfreq)
# Log
log_features = fn.to_decibels(
mel_spec + 1e-20, multiplier=np.log(10), reference=1.0, cutoff_db=-80)
# Frame splicing
if frame_splicing_stack > 1 or frame_splicing_subsample > 1:
log_features_spliced = dali_frame_splicing_graph(log_features, nfeatures, spec_len,
stacking=frame_splicing_stack,
subsampling=frame_splicing_subsample)
else:
log_features_spliced = log_features
# Normalization
if normalize_type:
norm_log_features = fn.normalize(
log_features_spliced, axes=norm_axes, device=device, epsilon=4e-5, ddof=1)
else:
norm_log_features = log_features_spliced
return (norm_log_features,
log_features_spliced,
log_features,
mel_spec,
spec,
preemph_audio,
padded_audio,
audio)
recordings = []
for fpath in npy_files:
arr = np.load(fpath)
arr = _convert_samples_to_float32(arr)
recordings.append(arr)
nrecordings = len(recordings)
# Test compares pre-calculated output of native data pipeline with an output
# from DALI data pipeline. There are few modification of native data pipeline
# comparing to the reference: random operations (i.e. dither and presampling
# aka "speed perturbation") are turned off
def _testimpl_rnnt_data_pipeline(device,
pad_amount=0,
preemph_coeff=.97,
window_size=.02,
window_stride=.01,
window="hann",
nfeatures=64,
n_fft=512,
frame_splicing_stack=1,
frame_splicing_subsample=1,
lowfreq=0.0,
highfreq=None,
normalize_type='per_feature',
batch_size=32):
sample_rate = npy_files_sr
speed_perturb = False
silence_trim = False
ref_pipeline = FilterbankFeatures(sample_rate=sample_rate,
window_size=window_size,
window_stride=window_stride,
window=window,
normalize=normalize_type,
n_fft=n_fft,
pad_amount=pad_amount,
preemph=preemph_coeff,
nfilt=nfeatures,
lowfreq=lowfreq,
highfreq=highfreq,
log=True,
frame_splicing_stack=frame_splicing_stack,
frame_splicing_subsample=frame_splicing_subsample)
reference_data = []
for i in range(nrecordings):
reference_data.append(
ref_pipeline.forward(
torch.tensor([recordings[i]]),
torch.tensor([recordings[i].shape[0]])
)
)
pipe = rnnt_train_pipe(audio_files,
sample_rate,
pad_amount,
preemph_coeff,
window_size, window_stride, window,
nfeatures, n_fft,
frame_splicing_stack, frame_splicing_subsample,
lowfreq, highfreq,
normalize_type,
speed_perturb,
silence_trim,
device,
seed=42,
batch_size=batch_size)
pipe.build()
nbatches = (nrecordings + batch_size - 1) // batch_size
i = 0
for b in range(nbatches):
dali_out = list(pipe.run())
for s in range(batch_size):
if i >= nrecordings:
break
(norm_log_features,
log_features_spliced,
log_features,
mel_spec,
spec,
preemph_audio,
padded_audio,
audio) = [to_array(out[s]) for out in dali_out]
ref = np.array(reference_data[i].squeeze(0))
assert ref.shape == norm_log_features.shape, f"{ref.shape}, {norm_log_features.shape}"
nfeatures, seq_len = ref.shape
audio_ref = recordings[i]
np.testing.assert_allclose(audio, audio_ref, atol=1e-4)
padded_audio_ref = torch_reflect_pad(audio, pad_amount)
np.testing.assert_equal(padded_audio, padded_audio_ref)
preemph_audio_ref = torch_preemphasis(padded_audio_ref, preemph=preemph_coeff)
np.testing.assert_allclose(preemph_audio, preemph_audio_ref, atol=1e-4)
spec_ref = torch_spectrogram(preemph_audio_ref, npy_files_sr,
window_size=window_size, window_stride=window_stride,
center=True, pad_mode='reflect',
window=window, n_fft=n_fft)
np.testing.assert_allclose(spec, spec_ref, atol=1e-4)
mel_spec_ref = torch_mel_fbank(spec_ref, npy_files_sr)
np.testing.assert_allclose(mel_spec, mel_spec_ref, atol=1e-4)
log_features_ref = torch_log(mel_spec_ref)
np.testing.assert_allclose(log_features, log_features_ref, atol=1e-3)
log_features_ref2 = torch_log(mel_spec)
np.testing.assert_allclose(log_features, log_features_ref2, atol=1e-4)
log_features_spliced_ref = torch_frame_splicing(log_features_ref,
stacking=frame_splicing_stack,
subsampling=frame_splicing_subsample)
np.testing.assert_allclose(log_features_spliced, log_features_spliced_ref, atol=1e-3)
log_features_spliced_ref2 = torch_frame_splicing(log_features,
stacking=frame_splicing_stack,
subsampling=frame_splicing_subsample)
np.testing.assert_allclose(log_features_spliced, log_features_spliced_ref2, atol=1e-4)
norm_log_features_ref = torch_normalize(log_features_spliced_ref, normalize_type)
np.testing.assert_allclose(norm_log_features, norm_log_features_ref, atol=1e-3)
norm_log_features_ref2 = torch_normalize(log_features_spliced, normalize_type)
np.testing.assert_allclose(norm_log_features, norm_log_features_ref2, atol=1e-4)
# Full pipeline
np.testing.assert_allclose(norm_log_features, ref, atol=1e-3)
i += 1
def test_rnnt_data_pipeline():
preemph_coeff = .97
window_size = .02
window_stride = .01
window = "hann"
nfeatures = 64
n_fft = 512
lowfreq = 0.0
highfreq = None
for device in ['cpu', 'gpu']:
for frame_splicing_stack, frame_splicing_subsample in [(1, 1), (3, 2)]:
for normalize_type in ['per_feature', 'all_features']:
pad_amount = random.choice([0, 16])
yield (_testimpl_rnnt_data_pipeline,
device,
pad_amount,
preemph_coeff,
window_size, window_stride, window,
nfeatures, n_fft,
frame_splicing_stack, frame_splicing_subsample,
lowfreq, highfreq,
normalize_type)
@nottest # To be run manually to check perf
def test_rnnt_data_pipeline_throughput(pad_amount=0,
preemph_coeff=.97,
window_size=.02,
window_stride=.01,
window="hann",
nfeatures=64,
n_fft=512,
frame_splicing_stack=1,
frame_splicing_subsample=1,
speed_perturb=True,
silence_trim=True,
lowfreq=0.0,
highfreq=None,
normalize_type='per_feature',
batch_size=32):
sample_rate = npy_files_sr
device = 'gpu'
pipe = rnnt_train_pipe(audio_files,
sample_rate,
pad_amount,
preemph_coeff,
window_size, window_stride, window,
nfeatures, n_fft,
frame_splicing_stack, frame_splicing_subsample,
lowfreq, highfreq,
normalize_type,
speed_perturb,
silence_trim,
device,
seed=42,
batch_size=batch_size)
pipe.build()
import time
from test_utils import AverageMeter
end = time.time()
data_time = AverageMeter()
iters = 1000
for j in range(iters):
pipe.run()
data_time.update(time.time() - end)
if j % 100 == 0:
print(f"run {j+1}/ {iters}, avg time: {data_time.avg} [s], "
f"worst time: {data_time.max_val} [s], "
f"speed: {batch_size / data_time.avg} [recordings/s]")
end = time.time()
|
DALI-main
|
dali/test/python/test_torch_pipeline_rnnt.py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali as dali
import nvidia.dali.fn as fn
import nvidia.dali.plugin.tf as dali_tf
import tensorflow as tf
from nose.tools import nottest
from nvidia.dali import pipeline_def
from test_utils import RandomlyShapedDataIterator
def get_min_shape_helper(batch, max_shape):
"""For batch=None or batch=True, we use batch mode which requires fixed shape.
In that case min and max shape for RandomSampleIterator need to be equal.
`batch` can also be a string "dataset" that indicates we passed a Dataset object as input
without specifying the batch mode through: Input(dataset, batch=...)
"""
if batch is None or batch is True:
return max_shape
else:
return None
class RandomSampleIterator:
def __init__(self, max_shape=(10, 600, 800, 3), dtype_sample=np.uint8(0), start=0, stop=1e100,
min_shape=None, seed=42):
self.start = start
self.stop = stop
self.min_shape = min_shape
self.max_shape = max_shape
# As tf passes only tensors to the iterator, we pass a dummy value of which we take the type
self.dtype = dtype_sample.dtype
self.seed = seed
def __iter__(self):
self.n = self.start
self.random_iter = iter(RandomlyShapedDataIterator(batch_size=1, min_shape=self.min_shape,
max_shape=self.max_shape, seed=self.seed,
dtype=self.dtype))
return self
def __next__(self):
if self.n <= self.stop:
self.n += 1
ret = self.random_iter.next()[0]
return ret
else:
raise StopIteration
class FixedSampleIterator:
def __init__(self, value):
self.value = value
def __iter__(self):
return self
def __next__(self):
return self.value
class InfiniteSampleIterator:
def __init__(self, start_value):
self.value = start_value
def __iter__(self):
return self
def __next__(self):
result = self.value
self.value = self.value + np.array(1, dtype=self.value.dtype)
return result
@pipeline_def
def one_input_pipeline(def_for_dataset, device, source, external_source_device, no_copy, batch):
"""Pipeline accepting single input via external source
Parameters
----------
def_for_dataset : bool
True if this pipeline will be converted to TF Dataset
device : str
device that the Dataset will be placed ("cpu" or "gpu")
source : callable
callback for the external source in baseline pipeline otherwise None
external_source_device : str
Device that we want the external source in TF dataset to be placed
"""
if def_for_dataset:
if no_copy is None:
# If no_copy is None, we infer it automatically and we use no_copy=True when
# the input memory is matching the external source placement,
# so the Dataset's placement is the same as external source's device,
# otherwise for cross-backend we use False.
no_copy = (device == external_source_device)
if batch == "dataset":
# Special value used in tests, reroute it to the default
batch = None
input = fn.external_source(name="input_placeholder",
no_copy=no_copy,
device=external_source_device,
batch=batch)
else:
input = fn.external_source(name="actual_input",
source=source,
batch=False,
device=external_source_device)
input = input if device == 'cpu' else input.gpu()
processed = fn.cast(input + 10, dtype=dali.types.INT32)
input_padded, processed_padded = fn.pad([input, processed])
return input_padded, processed_padded
# Test that uses Tensor and Repeat (infinite) datasets as inputs to DALI pipeline
def external_source_converter_with_fixed_value(shape, dtype, tensor, batch="dataset"):
def to_dataset(pipeline_desc, device_str):
dataset_pipeline, shapes, dtypes = pipeline_desc
with tf.device('/cpu:0'):
input_dataset = tf.data.Dataset.from_tensors(tensor).repeat()
if batch is None or batch is True:
input_dataset = input_dataset.batch(dataset_pipeline.max_batch_size)
# If we place DALIDataset on GPU we need the remote call + manual data transfer
if "gpu" in device_str:
input_dataset = input_dataset.apply(tf.data.experimental.copy_to_device('/gpu:0'))
if batch == "dataset":
input_datasets = {"input_placeholder": input_dataset}
else:
input_datasets = {
"input_placeholder": dali_tf.experimental.Input(input_dataset, batch=batch)
}
with tf.device(device_str):
dali_dataset = dali_tf.experimental.DALIDatasetWithInputs(
input_datasets=input_datasets,
pipeline=dataset_pipeline,
batch_size=dataset_pipeline.max_batch_size,
output_shapes=shapes,
output_dtypes=dtypes,
num_threads=dataset_pipeline.num_threads,
device_id=dataset_pipeline.device_id)
return dali_dataset
return to_dataset
def external_source_converter_with_callback(
input_iterator, shape, dtype, start_samples=0, stop_samples=1e10, min_shape=None,
batch="dataset"):
""" Test that uses Generator dataset as inputs to DALI pipeline """
def to_dataset(pipeline_desc, device_str):
dataset_pipeline, shapes, dtypes = pipeline_desc
with tf.device('/cpu:0'):
_args = (shape, dtype(0), start_samples, stop_samples)
# Add min_shape if it's not None
_args = _args + ((min_shape,) if min_shape is not None else ())
out_shape = tuple(None for _ in shape)
tf_type = tf.dtypes.as_dtype(dtype)
input_dataset = tf.data.Dataset.from_generator(
input_iterator, output_types=tf_type, output_shapes=out_shape, args=_args)
if batch is None or batch is True:
input_dataset = input_dataset.batch(dataset_pipeline.max_batch_size)
# If we place DALIDataset on GPU we need the remote call + manual data transfer
if "gpu" in device_str:
input_dataset = input_dataset.apply(tf.data.experimental.copy_to_device('/gpu:0'))
if batch == "dataset":
input_datasets = {"input_placeholder": input_dataset}
else:
input_datasets = {
"input_placeholder": dali_tf.experimental.Input(input_dataset, batch=batch)
}
with tf.device(device_str):
dali_dataset = dali_tf.experimental.DALIDatasetWithInputs(
input_datasets=input_datasets,
pipeline=dataset_pipeline,
batch_size=dataset_pipeline.max_batch_size,
output_shapes=shapes,
output_dtypes=dtypes,
num_threads=dataset_pipeline.num_threads,
device_id=dataset_pipeline.device_id)
return dali_dataset
return to_dataset
@nottest
def external_source_tester(shape, dtype, source=None, external_source_device="cpu", no_copy=None,
batch=False):
def get_external_source_pipeline_getter(batch_size, num_threads, device, device_id=0,
shard_id=0, num_shards=1, def_for_dataset=False):
pipe = one_input_pipeline(def_for_dataset, device, source, external_source_device,
batch_size=batch_size, num_threads=num_threads,
device_id=device_id, no_copy=no_copy, batch=batch)
batch_shape = (batch_size,) + tuple(None for _ in shape)
return pipe, (batch_shape, batch_shape), (tf.dtypes.as_dtype(dtype), tf.int32)
return get_external_source_pipeline_getter
@pipeline_def
def many_input_pipeline(def_for_dataset, device, sources, input_names, batches):
""" Pipeline accepting multiple inputs via external source
Parameters
----------
def_for_dataset : bool
True if this pipeline will be converted to TF Dataset
device : str
device that the Dataset will be placed ("cpu" or "gpu")
sources : list of callables
callbacks for the external sources in baseline pipeline otherwise None
input_names : list of str
Names of inputs placeholder for TF
"""
inputs = []
if def_for_dataset:
for input_name, batch in zip(input_names, batches):
if batch == "dataset":
# Special value used in tests, reroute it to the default
batch = None
input = fn.external_source(name=input_name, batch=batch)
input = input if device == 'cpu' else input.gpu()
inputs.append(input)
else:
for source in sources:
input = fn.external_source(source=source, batch=False)
input = input if device == 'cpu' else input.gpu()
inputs.append(input)
processed = []
for input in inputs:
processed.append(fn.cast(input + 10, dtype=dali.types.INT32))
results = fn.pad(inputs + processed)
return tuple(results)
# Test that uses multiple Generator dataset as inputs to DALI pipeline
def external_source_converter_multiple(start_values, input_names, batches):
def to_dataset(pipeline_desc, device_str):
dataset_pipeline, shapes, dtypes = pipeline_desc
input_datasets = {}
with tf.device('/cpu:0'):
for value, name, batch in zip(start_values, input_names, batches):
tf_type = tf.dtypes.as_dtype(value.dtype)
shape = value.shape
input_dataset = tf.data.Dataset.from_generator(
InfiniteSampleIterator, output_types=tf_type, output_shapes=shape,
args=(value,))
if batch is None or batch is True:
input_dataset = input_dataset.batch(dataset_pipeline.max_batch_size)
# If we place DALIDataset on GPU we need the remote call + manual data transfer
if "gpu" in device_str:
input_dataset = input_dataset.apply(
tf.data.experimental.copy_to_device('/gpu:0'))
if batch == "dataset":
input_datasets[name] = input_dataset
else:
input_datasets[name] = dali_tf.experimental.Input(input_dataset, batch=batch)
with tf.device(device_str):
dali_dataset = dali_tf.experimental.DALIDatasetWithInputs(
input_datasets=input_datasets,
pipeline=dataset_pipeline,
batch_size=dataset_pipeline.max_batch_size,
output_shapes=shapes,
output_dtypes=dtypes,
num_threads=dataset_pipeline.num_threads,
device_id=dataset_pipeline.device_id)
return dali_dataset
return to_dataset
@nottest
def external_source_tester_multiple(start_values, input_names, batches):
def get_external_source_pipeline_getter(batch_size, num_threads, device, device_id=0,
shard_id=0, num_shards=1, def_for_dataset=False):
sources = [InfiniteSampleIterator(start_value) for start_value in start_values]
output_shapes = [((batch_size,) + tuple(None for _ in start_value.shape))
for start_value in start_values]
output_shapes = tuple(output_shapes + output_shapes)
output_dtypes = tuple(
[tf.dtypes.as_dtype(start_value.dtype)
for start_value in start_values] + [tf.int32] * len(start_values))
pipe = many_input_pipeline(def_for_dataset, device, sources, input_names,
batch_size=batch_size, num_threads=num_threads,
device_id=device_id, batches=batches)
return pipe, output_shapes, output_dtypes
return get_external_source_pipeline_getter
|
DALI-main
|
dali/test/python/test_dali_tf_dataset_pipelines.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali.fn as fn
from nose.plugins.attrib import attr
from nose2.tools import params
from numpy.random import default_rng
from nvidia.dali import pipeline_def
from nvidia.dali.tensors import TensorCPU, TensorGPU
from nose_utils import raises
from test_utils import to_array
max_batch_size = 256
max_test_value = 1e7
@pipeline_def(batch_size=max_batch_size, num_threads=1, device_id=0)
def identity_pipe(use_copy_kernel, blocking):
ins = (fn.external_source(name='numpy', device='cpu',
use_copy_kernel=use_copy_kernel, blocking=blocking,
cycle=False, cuda_stream=1, no_copy=True, batch=True,
batch_info=False, parallel=False),
fn.external_source(name='cupy', device='gpu',
use_copy_kernel=use_copy_kernel, blocking=blocking,
cycle=False, cuda_stream=1, no_copy=True, batch=True,
batch_info=False, parallel=False),
fn.external_source(name='torch_cpu', device='cpu',
use_copy_kernel=use_copy_kernel, blocking=blocking,
cycle=False, cuda_stream=1, no_copy=True, batch=True,
batch_info=False, parallel=False),
fn.external_source(name='torch_gpu', device='gpu',
use_copy_kernel=use_copy_kernel, blocking=blocking,
cycle=False, cuda_stream=1, no_copy=True, batch=True,
batch_info=False, parallel=False),
fn.external_source(name='tensor_cpu', device='cpu',
use_copy_kernel=use_copy_kernel, blocking=blocking,
cycle=False, cuda_stream=1, no_copy=True, batch=True,
batch_info=False, parallel=False),
fn.external_source(name='tensor_gpu', device='gpu',
use_copy_kernel=use_copy_kernel, blocking=blocking,
cycle=False, cuda_stream=1, no_copy=True, batch=True,
batch_info=False, parallel=False),
fn.external_source(name='list_cpu', device='cpu',
use_copy_kernel=use_copy_kernel, blocking=blocking,
cycle=False, cuda_stream=1, no_copy=True, batch=True,
batch_info=False, parallel=False))
return tuple(i.gpu() for i in ins)
@attr('torch')
@attr('cupy')
@params(
(True, True),
(False, True),
(True, False),
(False, False),
)
def test_pipeline_inputs_prefetch_queue_depth(use_copy_kernel, blocking):
import torch
import cupy as cp
rng = default_rng()
n_iterations = 8
p = identity_pipe(use_copy_kernel, blocking, prefetch_queue_depth=1)
p.build()
for _ in range(n_iterations):
batch_size = rng.integers(1, max_batch_size)
random_in = rng.random(size=(batch_size, 4, 6, 2))
in_list_cpu = [rng.integers(low=-max_test_value, high=max_test_value, size=(5, 3, 2))
for _ in range(batch_size)]
numpy, cupy, torch_cpu, torch_gpu, tensor_cpu, tensor_gpu, out_list_cpu = p.run(
numpy=random_in,
cupy=cp.array(random_in),
torch_cpu=torch.Tensor(random_in),
torch_gpu=torch.Tensor(random_in).cuda(),
tensor_cpu=TensorCPU(random_in),
tensor_gpu=TensorGPU(cp.array(random_in)),
list_cpu=in_list_cpu
)
assert np.all(np.isclose(to_array(numpy), random_in))
assert np.all(np.isclose(to_array(cupy), random_in))
assert np.all(np.isclose(to_array(torch_cpu), random_in))
assert np.all(np.isclose(to_array(torch_gpu), random_in))
assert np.all(np.isclose(to_array(tensor_cpu), random_in))
assert np.all(np.isclose(to_array(tensor_gpu), random_in))
for ref, tst in zip(in_list_cpu, out_list_cpu):
assert np.all(np.isclose(to_array(tst), ref))
@attr('torch')
@attr('cupy')
@params(
(True, True),
(False, True),
(True, False),
(False, False),
)
def test_pipeline_inputs_exec_pipelined(use_copy_kernel, blocking):
import torch
import cupy as cp
rng = default_rng()
n_iterations = 8
p = identity_pipe(use_copy_kernel, blocking, exec_pipelined=False, exec_async=False)
p.build()
for _ in range(n_iterations):
batch_size = rng.integers(1, max_batch_size)
random_in = rng.random(size=(batch_size, 4, 6, 2))
in_list_cpu = [rng.integers(low=-max_test_value, high=max_test_value, size=(5, 3, 2))
for _ in range(batch_size)]
numpy, cupy, torch_cpu, torch_gpu, tensor_cpu, tensor_gpu, out_list_cpu = p.run(
numpy=random_in,
cupy=cp.array(random_in),
torch_cpu=torch.Tensor(random_in),
torch_gpu=torch.Tensor(random_in).cuda(),
tensor_cpu=TensorCPU(random_in),
tensor_gpu=TensorGPU(cp.array(random_in)),
list_cpu=in_list_cpu
)
assert np.all(np.isclose(to_array(numpy), random_in))
assert np.all(np.isclose(to_array(cupy), random_in))
assert np.all(np.isclose(to_array(torch_cpu), random_in))
assert np.all(np.isclose(to_array(torch_gpu), random_in))
assert np.all(np.isclose(to_array(tensor_cpu), random_in))
assert np.all(np.isclose(to_array(tensor_gpu), random_in))
for ref, tst in zip(in_list_cpu, out_list_cpu):
assert np.all(np.isclose(to_array(tst), ref))
@raises(RuntimeError, glob="*`prefetch_queue_depth` in Pipeline constructor shall be set to 1*")
def test_incorrect_prefetch_queue_depth():
p = identity_pipe(False, False)
p.build()
rng = default_rng()
batch_size = rng.integers(1, max_batch_size)
random_in = rng.random(size=(batch_size, 4, 6, 2))
p.run(numpy=random_in)
|
DALI-main
|
dali/test/python/test_pipeline_inputs.py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copyreg
import numpy as np
import nvidia.dali.fn as fn
import nvidia.dali.pickling as dali_pickle
import os
from nvidia.dali import pipeline_def
from pickle import PicklingError
from nose_utils import raises
from test_utils import get_dali_extra_path, restrict_python_version
tests_dali_pickling = []
tests_dill_pickling = []
tests_cloudpickle_pickling = []
def register_case(suite):
def decorator(test_case):
suite.append(test_case)
return test_case
return decorator
def _simple_callback(sample_info):
return np.full((5, 6), sample_info.idx_in_epoch, dtype=np.int32)
@dali_pickle.pickle_by_value
def _simple_callback_by_value(sample_info):
return np.full((5, 6), sample_info.idx_in_epoch, dtype=np.int32)
def callback_const_42(sample_info):
return np.full((10, 20), 42, dtype=np.uint8)
def callback_const_84(sample_info):
return np.full((10, 20), 84, dtype=np.uint8)
def standard_global_callback(sample_info):
return np.full((10, 20), sample_info.idx_in_batch, dtype=np.uint8)
def callback_idx(i):
return np.full((10, 20), i, dtype=np.uint8)
@dali_pickle.pickle_by_value
def callback_idx_by_value(i):
return np.full((10, 20), i, dtype=np.uint8)
def dumps(obj, **kwargs):
if kwargs.get('special_dumps_param') != 42:
raise ValueError("Expected special_dumps_param among kwargs, got {}".format(kwargs))
return dali_pickle._DaliPickle.dumps(obj)
def loads(data, **kwargs):
obj = dali_pickle._DaliPickle.loads(data)
if kwargs.get('special_loads_param') == 84:
return obj if obj.__name__ != 'callback_const_84' else callback_const_42
return obj
# Register dummy reducer for custom type to check if DALI pickler did not interfere with
# ability to register custom reducers for user defined types
class DummyCb:
def __call__(self, sample_info):
return np.int32([1])
class DummyCb42:
def __call__(self, sample_info):
return np.int32([42])
def crazy_reducer(obj):
return DummyCb42().__reduce__()
copyreg.pickle(DummyCb, crazy_reducer)
# use process pid to assert that arrays were passed by value
# and are equal for externalsource run in the same and separate process
global_numpy_arrays = [np.full((10, 10), os.getpid() + i) for i in range(30)]
def create_closure_callback_numpy(shape, data_set_size):
# use process pid to assert that arrays were passed by value
# and are equal for externalsource run in the same and separate process
data = [np.full(shape, os.getpid()) for _ in range(data_set_size)]
def callback(sample_info):
if sample_info.idx_in_epoch >= data_set_size:
raise StopIteration
return data[sample_info.idx_in_epoch]
return callback
def create_closure_callback_img_reader(data_set_size):
data_root = get_dali_extra_path()
images_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
with open(os.path.join(images_dir, "image_list.txt"), 'r') as f:
file_label = [line.rstrip().split(' ') for line in f if line != '']
files, labels = zip(*file_label)
def py_file_reader(sample_info):
if sample_info.idx_in_epoch >= data_set_size:
raise StopIteration
sample_idx = sample_info.idx_in_epoch % len(files)
jpeg_filename = files[sample_idx]
label = np.int32([labels[sample_idx]])
with open(os.path.join(images_dir, jpeg_filename), 'rb') as f:
encoded_img = np.frombuffer(f.read(), dtype=np.uint8)
return encoded_img, label
return py_file_reader
def create_closure_generator_img_reader(batch_size, data_set_size):
data_root = get_dali_extra_path()
images_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
with open(os.path.join(images_dir, "image_list.txt"), 'r') as f:
file_label = [line.rstrip().split(' ') for line in f if line != '']
files, labels = zip(*file_label)
def py_file_gen_reader():
i = 0
while i + batch_size <= data_set_size:
batch_imgs, batch_labels = [], []
for _ in range(batch_size):
jpeg_filename = files[i]
with open(os.path.join(images_dir, jpeg_filename), 'rb') as f:
batch_imgs.append(np.frombuffer(f.read(), dtype=np.uint8))
batch_labels.append(np.int32([labels[i]]))
i += 1
yield batch_imgs, batch_labels
return py_file_gen_reader
jpeg_file = os.path.join(
get_dali_extra_path(), 'db', 'single', 'jpeg', '510', 'ship-1083562_640.jpg')
sequence_lenght = 4
def create_callback_with_syntactically_nested_code_referencing_global_var():
def cb_from_closure():
return 0
def get_data(sample_info):
def nested_in_cb():
def super_nested():
return np.fromfile(jpeg_file, dtype=np.uint8) + cb_from_closure()
return super_nested
return nested_in_cb()()
return get_data
def create_callback_with_list_comprehension_referencing_global_var():
def get_data(sample_info):
return [x for xs in
[[np.fromfile(jpeg_file, dtype=np.uint8) for _ in range(sequence_lenght)]
for i in range(2)] for x in xs]
return get_data
def create_simple_pipeline(callback, py_callback_pickler, batch_size, parallel=True,
py_num_workers=None, py_start_method="spawn"):
extra = {}
if parallel:
extra["py_num_workers"] = py_num_workers
extra["py_start_method"] = py_start_method
if py_callback_pickler is not None:
extra['py_callback_pickler'] = py_callback_pickler
@pipeline_def(batch_size=batch_size, num_threads=2, device_id=0, **extra)
def create_pipline():
outputs = fn.external_source(source=callback, batch=False, parallel=parallel)
return outputs
return create_pipline()
def create_stacking_pipeline(callback, py_callback_pickler, batch_size, parallel=True,
py_num_workers=None, py_start_method="spawn"):
extra = {}
if parallel:
extra["py_num_workers"] = py_num_workers
extra["py_start_method"] = py_start_method
if py_callback_pickler is not None:
extra['py_callback_pickler'] = py_callback_pickler
@pipeline_def(batch_size=batch_size, num_threads=2, device_id=0, **extra)
def create_pipline():
jpegs = fn.external_source(source=callback, num_outputs=sequence_lenght * 2,
parallel=parallel, batch=False)
images = fn.decoders.image(jpegs, device="cpu")
sequence = fn.stack(*images)
sequence = fn.reshape(sequence, layout="DHWC")
return sequence
return create_pipline()
def create_decoding_pipeline(callback, py_callback_pickler, batch_size, parallel=True,
py_num_workers=None, py_start_method="spawn", batch=False):
extra = {}
if parallel:
extra["py_num_workers"] = py_num_workers
extra["py_start_method"] = py_start_method
if py_callback_pickler is not None:
extra['py_callback_pickler'] = py_callback_pickler
@pipeline_def(batch_size=batch_size, num_threads=2, device_id=0, **extra)
def create_pipline():
jpegs, labels = fn.external_source(
source=callback, num_outputs=2,
batch=batch, parallel=parallel)
images = fn.decoders.image(jpegs, device="cpu")
return images, labels
return create_pipline()
def _run_and_compare_outputs(batch_size, parallel_pipeline, serial_pipeline):
parallel_batch = parallel_pipeline.run()
serial_batch = serial_pipeline.run()
for (parallel_output, serial_output) in zip(parallel_batch, serial_batch):
assert len(parallel_output) == batch_size
assert len(serial_output) == batch_size
for i in range(batch_size):
assert np.array_equal(parallel_output[i], serial_output[i])
def _build_and_compare_pipelines_epochs(epochs_num, batch_size, parallel_pipeline, serial_pipeline):
parallel_pipeline.build()
serial_pipeline.build()
assert parallel_pipeline._py_pool is not None
assert serial_pipeline._py_pool is None
for _ in range(epochs_num):
try:
while True:
_run_and_compare_outputs(batch_size, parallel_pipeline, serial_pipeline)
except StopIteration:
parallel_pipeline.reset()
serial_pipeline.reset()
def _create_and_compare_simple_pipelines(cb, py_callback_pickler, batch_size, py_num_workers=2,
py_start_method="spawn"):
parallel_pipeline = create_simple_pipeline(
cb,
py_callback_pickler,
batch_size=batch_size,
py_num_workers=py_num_workers,
py_start_method=py_start_method,
parallel=True)
serial_pipeline = create_simple_pipeline(cb, None, batch_size=batch_size, parallel=False)
parallel_pipeline.build()
serial_pipeline.build()
for _ in range(3):
_run_and_compare_outputs(batch_size, parallel_pipeline, serial_pipeline)
# It uses fork method to start so need to be run as the first test
def test_no_pickling_in_forking_mode():
# modify callback name so that an attempt to pickle it in spawn mode would fail
_simple_callback.__name__ = _simple_callback.__qualname__ = "simple_callback"
_create_and_compare_simple_pipelines(_simple_callback, None, batch_size=8,
py_num_workers=2, py_start_method="fork")
# Run this one as sanity check that standard serialization is not broken by the change
def test_standard_global_function_serialization():
_create_and_compare_simple_pipelines(standard_global_callback, None, batch_size=4,
py_num_workers=2)
def test_if_custom_type_reducers_are_respected_by_dali_reducer():
batch_size = 8
parallel_pipeline = create_simple_pipeline(DummyCb(), None, batch_size=batch_size,
py_num_workers=2, parallel=True)
parallel_pipeline.build()
(batch,) = parallel_pipeline.run()
assert len(batch) == batch_size
for i in range(batch_size):
assert np.array_equal(batch[i], np.int32([42]))
@register_case(tests_dali_pickling)
@raises(PicklingError, "Can't pickle * attribute lookup simple_callback on * failed")
def _test_global_function_pickled_by_reference(name, py_callback_pickler):
# modify callback name so that an attempt to pickle by reference,
# which is default Python behavior, fails
_simple_callback.__name__ = _simple_callback.__qualname__ = "simple_callback"
_create_and_compare_simple_pipelines(_simple_callback, py_callback_pickler, batch_size=4,
py_num_workers=2)
@register_case(tests_dali_pickling)
def _test_pickle_by_value_decorator_on_global_function(name, py_callback_pickler):
# modify callback name so that an attempt to pickle by reference,
# which is default Python behavior, would fail
_simple_callback_by_value.__name__ = _simple_callback_by_value.__qualname__ = "simple_callback_by_value" # noqa: E501
_create_and_compare_simple_pipelines(_simple_callback_by_value, py_callback_pickler,
batch_size=4, py_num_workers=2)
@register_case(tests_dali_pickling)
@raises(ValueError, "Expected special_dumps_param among kwargs, got *")
def _test_pickle_does_not_pass_extra_params_function(name, py_callback_pickler):
this_module = __import__(__name__)
_create_and_compare_simple_pipelines(callback_const_42, this_module, batch_size=4,
py_num_workers=2)
@register_case(tests_dali_pickling)
def _test_pickle_passes_extra_dumps_params_function(name, py_callback_pickler):
this_module = __import__(__name__)
_create_and_compare_simple_pipelines(callback_const_42,
(this_module, {'special_dumps_param': 42}), batch_size=4,
py_num_workers=2)
@register_case(tests_dali_pickling)
def _test_pickle_passes_extra_dumps_loads_params_function(name, py_callback_pickler):
this_module = __import__(__name__)
batch_size = 4
# this_module.loads replaces callback_const_84 to callback_const_42
# iff it receives special_loads_param
parallel_pipeline = create_simple_pipeline(
callback_const_84, (this_module, {'special_dumps_param': 42}, {'special_loads_param': 84}),
batch_size=batch_size, py_num_workers=2, parallel=True)
serial_pipeline = create_simple_pipeline(
callback_const_42, None, batch_size=batch_size, parallel=False)
parallel_pipeline.build()
serial_pipeline.build()
for _ in range(3):
_run_and_compare_outputs(batch_size, parallel_pipeline, serial_pipeline)
@register_case(tests_dali_pickling)
def _test_global_function_wrapped_in_lambda_by_value(name, py_callback_pickler):
# modify callback name so that an attempt to pickle by reference,
# which is default Python behavior, would fail
callback_idx_by_value.__name__ = callback_idx_by_value.__qualname__ = "_scrambled_name"
_create_and_compare_simple_pipelines(lambda x: callback_idx_by_value(x.idx_in_epoch),
py_callback_pickler, batch_size=8, py_num_workers=2)
@register_case(tests_dali_pickling)
@register_case(tests_dill_pickling)
@register_case(tests_cloudpickle_pickling)
def _test_lambda_np_full(name, py_callback_pickler):
_create_and_compare_simple_pipelines(lambda x: np.full((100, 100), x.idx_in_epoch),
py_callback_pickler, batch_size=8, py_num_workers=2)
@register_case(tests_dali_pickling)
@register_case(tests_dill_pickling)
@register_case(tests_cloudpickle_pickling)
def _test_lambda_np_readfromfile(name, py_callback_pickler):
data_root = get_dali_extra_path()
images_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
with open(os.path.join(images_dir, "image_list.txt"), 'r') as f:
file_label = [line.rstrip().split(' ') for line in f if line != '']
files, _ = zip(*file_label)
_create_and_compare_simple_pipelines(
lambda x: (np.fromfile(os.path.join(images_dir, files[x.idx_in_epoch % len(files)]),
dtype=np.uint8)),
py_callback_pickler, batch_size=8, py_num_workers=2)
@register_case(tests_dali_pickling)
@register_case(tests_dill_pickling)
@register_case(tests_cloudpickle_pickling)
def _test_serialization_of_globals_from_code_nested_in_cb(name, py_callback_pickler):
_create_and_compare_simple_pipelines(
create_callback_with_syntactically_nested_code_referencing_global_var(),
py_callback_pickler, batch_size=8, py_num_workers=2)
@register_case(tests_dali_pickling)
@register_case(tests_dill_pickling)
@register_case(tests_cloudpickle_pickling)
def _test_serialization_of_globals_referenced_in_list_comprehension_in_cb(name,
py_callback_pickler):
batch_size = 10
parallel_pipeline = create_stacking_pipeline(
create_callback_with_list_comprehension_referencing_global_var(),
py_callback_pickler, batch_size=batch_size, py_num_workers=2, parallel=True)
serial_pipeline = create_stacking_pipeline(
create_callback_with_list_comprehension_referencing_global_var(),
None, batch_size=batch_size, parallel=False)
parallel_pipeline.build()
serial_pipeline.build()
for _ in range(3):
_run_and_compare_outputs(batch_size, parallel_pipeline, serial_pipeline)
@register_case(tests_dali_pickling)
@register_case(tests_cloudpickle_pickling)
def _test_mutually_recursive_functions(name, py_callback_pickler):
def div_by_2(n, acc=0): return acc if n <= 0 else add_one(n // 2, acc)
add_one = lambda n, acc: div_by_2(n, acc + 1) # noqa: E731
_create_and_compare_simple_pipelines(
lambda x: np.int32([div_by_2(x.idx_in_epoch)]),
py_callback_pickler, batch_size=15, py_num_workers=2)
@register_case(tests_dali_pickling)
@register_case(tests_cloudpickle_pickling)
def _test_builtin_functions_usage_in_cb(name, py_callback_pickler):
def div_by_2(n, acc=0): return acc if n <= 0 else add_one(n // 2, acc)
add_one = lambda n, acc: div_by_2(n, acc + 1) # noqa: E731
_create_and_compare_simple_pipelines(
lambda x: np.int32([div_by_2(x.idx_in_epoch)]) + len(dir(np)),
py_callback_pickler, batch_size=15, py_num_workers=2)
@register_case(tests_dali_pickling)
@register_case(tests_dill_pickling)
@register_case(tests_cloudpickle_pickling)
def _test_module_dependency(name, py_callback_pickler):
import import_module_test_helper
_create_and_compare_simple_pipelines(
lambda x: import_module_test_helper.cb(x),
py_callback_pickler, batch_size=15, py_num_workers=2)
@register_case(tests_dali_pickling)
@register_case(tests_dill_pickling)
@register_case(tests_cloudpickle_pickling)
def _test_module_dependency_unqualified(name, py_callback_pickler):
from import_module_test_helper import cb
_create_and_compare_simple_pipelines(
lambda x: cb(x),
py_callback_pickler, batch_size=15, py_num_workers=2)
@register_case(tests_dali_pickling)
@register_case(tests_dill_pickling)
@register_case(tests_cloudpickle_pickling)
def _test_module_dependency_by_reference(name, py_callback_pickler):
from import_module_test_helper import cb
_create_and_compare_simple_pipelines(
cb, py_callback_pickler, batch_size=15, py_num_workers=2)
@register_case(tests_dali_pickling)
@register_case(tests_dill_pickling)
@register_case(tests_cloudpickle_pickling)
def _test_accessing_global_np_list(name, py_callback_pickler):
_create_and_compare_simple_pipelines(
lambda x: global_numpy_arrays[x.idx_in_epoch % len(global_numpy_arrays)],
py_callback_pickler,
batch_size=9, py_num_workers=2)
def __test_numpy_closure(shape, py_callback_pickler):
batch_size = 8
epochs_num = 3
callback = create_closure_callback_numpy(shape, data_set_size=epochs_num * batch_size)
parallel_pipeline = create_simple_pipeline(callback, py_callback_pickler, batch_size=batch_size,
py_num_workers=2, parallel=True)
serial_pipeline = create_simple_pipeline(callback, None, batch_size=batch_size, parallel=False)
_build_and_compare_pipelines_epochs(epochs_num, batch_size, parallel_pipeline, serial_pipeline)
@register_case(tests_dali_pickling)
@register_case(tests_dill_pickling)
@register_case(tests_cloudpickle_pickling)
def _test_numpy_closure(name, py_callback_pickler):
for shape in [tuple(), (5, 5, 5,)]:
yield __test_numpy_closure, shape, py_callback_pickler
@register_case(tests_dali_pickling)
@register_case(tests_dill_pickling)
@register_case(tests_cloudpickle_pickling)
def _test_reader_closure(name, py_callback_pickler):
batch_size = 7
batches_in_epoch = 3
epochs_num = 3
callback = create_closure_callback_img_reader(data_set_size=batches_in_epoch * batch_size)
parallel_pipeline = create_decoding_pipeline(callback, py_callback_pickler,
batch_size=batch_size, py_num_workers=2,
parallel=True)
serial_pipeline = create_decoding_pipeline(callback, None, batch_size=batch_size,
parallel=False)
_build_and_compare_pipelines_epochs(epochs_num, batch_size, parallel_pipeline, serial_pipeline)
@register_case(tests_dali_pickling)
@register_case(tests_dill_pickling)
@register_case(tests_cloudpickle_pickling)
def _test_generator_closure(name, py_callback_pickler):
batch_size = 7
batches_in_epoch = 3
epochs_num = 3
callback = create_closure_generator_img_reader(
batch_size=batch_size, data_set_size=batches_in_epoch * batch_size)
parallel_pipeline = create_decoding_pipeline(callback, py_callback_pickler,
batch_size=batch_size, py_num_workers=1,
parallel=True, batch=True)
serial_pipeline = create_decoding_pipeline(callback, None, batch_size=batch_size,
parallel=False, batch=True)
_build_and_compare_pipelines_epochs(epochs_num, batch_size, parallel_pipeline, serial_pipeline)
@restrict_python_version(3, 8)
def test_dali_pickling():
for i, test in enumerate(tests_dali_pickling, start=1):
yield test, "{}. {}".format(i, test.__name__.strip('_')), None
def test_cloudpickle_pickling():
import cloudpickle
for i, test in enumerate(tests_cloudpickle_pickling, start=1):
yield test, "{}. {}".format(i, test.__name__.strip('_')), cloudpickle
def test_dill_pickling():
import dill
for i, test in enumerate(tests_dill_pickling, start=1):
yield test, "{}. {}".format(i, test.__name__.strip('_')), (dill, {'recurse': True})
|
DALI-main
|
dali/test/python/test_external_source_parallel_custom_serialization.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nose.plugins.attrib import attr
# it is enough to just import all functions from test_internals_operator_external_source
# nose will query for the methods available and will run them
# the test_internals_operator_external_source is 99% the same for cupy and numpy tests
# so it is better to store everything in one file and just call `use_cupy` to
# switch between the default numpy and cupy
from test_external_source_impl import * # noqa:F403, F401
from test_external_source_impl import use_cupy
from test_utils import check_output, check_output_pattern
import nvidia.dali
from nvidia.dali import Pipeline, pipeline_def
import nvidia.dali.fn as fn
from nvidia.dali.tensors import TensorGPU
import numpy as np
use_cupy()
# extra tests, GPU-specific
import cupy as cp # noqa:E402 - we need to call this after use_cupy()
assert nvidia.dali.types._is_cupy_array(cp.array([1, 2, 3])), "CuPy array not recognized"
def test_external_source_with_iter_cupy_stream():
with cp.cuda.Stream(non_blocking=True):
for attempt in range(10):
pipe = Pipeline(1, 3, 0)
def get_data(i):
return [cp.array([attempt * 100 + i * 10 + 1.5], dtype=cp.float32)]
pipe.set_outputs(fn.external_source(get_data))
pipe.build()
for i in range(10):
check_output(pipe.run(),
[np.array([attempt * 100 + i * 10 + 1.5], dtype=np.float32)])
def test_external_source_mixed_contiguous():
batch_size = 2
iterations = 4
def generator(i):
if i % 2:
return cp.array([[100 + i * 10 + 1.5]] * batch_size, dtype=cp.float32)
else:
return batch_size * [cp.array([100 + i * 10 + 1.5], dtype=cp.float32)]
pipe = Pipeline(batch_size, 3, 0)
pipe.set_outputs(fn.external_source(device="gpu", source=generator, no_copy=True))
pipe.build()
pattern = "ExternalSource operator should not mix contiguous and noncontiguous inputs. " \
"In such a case the internal memory used to gather data in a contiguous chunk of " \
"memory would be trashed."
with check_output_pattern(pattern):
for _ in range(iterations):
pipe.run()
def _test_cross_device(src, dst, use_dali_tensor=False):
# The use_dali_tensor converts (via the Dlpack) to the DALI native Tensor before feeding the
# data, to additionaly check if the constructor works correctly wrt to device_id.
# TODO(klecki): [device_id] currently the device_id is not exposed in Python Tensors, so there
# is no other way we may verify it.
import nvidia.dali.fn as fn
import numpy as np
pipe = Pipeline(1, 3, dst)
iter = 0
def get_data():
nonlocal iter
with cp.cuda.Device(src):
data = cp.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=cp.float32) + iter
iter += 1
if use_dali_tensor:
return TensorGPU(data.toDlpack())
return data
with pipe:
pipe.set_outputs(fn.external_source(get_data, batch=False, device='gpu'))
pipe.build()
for i in range(10):
out, = pipe.run()
assert np.array_equal(np.array(out[0].as_cpu()), np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) + i)
@attr('multigpu')
def test_cross_device():
if cp.cuda.runtime.getDeviceCount() > 1:
for src in [0, 1]:
for dst in [0, 1]:
for use_dali_tensor in [True, False]:
yield _test_cross_device, src, dst, use_dali_tensor
def _test_memory_consumption(device, test_case):
batch_size = 32
num_iters = 128
if device == "cpu":
import numpy as np
fw = np
else:
fw = cp
def no_copy_sample():
batch = [fw.full((1024, 1024, 4), i, dtype=fw.int32) for i in range(batch_size)]
def cb(sample_info):
return batch[sample_info.idx_in_batch]
return cb
def copy_sample():
def cb(sample_info):
return fw.full((1024, 1024, 4), sample_info.idx_in_batch, dtype=fw.int32)
return cb
def copy_batch():
def cb():
return fw.full((batch_size, 1024, 1024, 4), 42, dtype=fw.int32)
return cb
cases = {
'no_copy_sample': (no_copy_sample, True, False),
'copy_sample': (copy_sample, False, False),
'copy_batch': (copy_batch, False, True)
}
cb, no_copy, batch_mode = cases[test_case]
@pipeline_def
def pipeline():
return fn.external_source(source=cb(), device=device, batch=batch_mode, no_copy=no_copy)
pipe = pipeline(batch_size=batch_size, num_threads=4, device_id=0)
pipe.build()
for _ in range(num_iters):
pipe.run()
def test_memory_consumption():
for device in ["cpu", "gpu"]:
for test_case in ["no_copy_sample", "copy_sample", "copy_batch"]:
yield _test_memory_consumption, device, test_case
|
DALI-main
|
dali/test/python/test_external_source_cupy.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# it is enough to just import all functions from test_internals_operator_external_source
# nose will query for the methods available and will run them
# the test_internals_operator_external_source is 99% the same for cupy and numpy tests
# so it is better to store everything in one file and just call
# use_cupy` to switch between the default numpy and cupy
import numpy as np
import torch
import test_external_source_parallel_utils as utils
from nose_utils import raises
class ExtCallbackTorch(utils.ExtCallback):
def __call__(self, sample_info):
return torch.tensor(super().__call__(sample_info))
@raises(RuntimeError, "Error*starting Python worker threads for*parallel External Source*"
"Cannot fork*CUDA has been initialized*"
"*start_py_workers*fork*spawn*")
def test_pytorch_cuda_context():
# Create a dummy torch CUDA tensor so we acquire CUDA context
cuda0 = torch.device('cuda:0')
_ = torch.ones([1, 1], dtype=torch.float32, device=cuda0)
callback = utils.ExtCallback((4, 5), 10, np.int32)
pipe = utils.create_pipe(callback, 'cpu', 5, py_num_workers=6, py_start_method='fork',
parallel=True)
pipe.start_py_workers()
def test_pytorch():
yield from utils.check_spawn_with_callback(ExtCallbackTorch)
class ExtCallbackTorchCuda(utils.ExtCallback):
def __call__(self, sample_info):
return torch.tensor(super().__call__(sample_info), device=torch.device('cuda:0'))
@raises(Exception, "Exception traceback received from worker thread*"
"TypeError: Unsupported callback return type. GPU tensors*not supported*"
"Got*PyTorch GPU tensor")
def test_pytorch_cuda():
callback = ExtCallbackTorchCuda((4, 5), 10, np.int32)
pipe = utils.create_pipe(callback, 'cpu', 5, py_num_workers=6, py_start_method='spawn',
parallel=True)
utils.build_and_run_pipeline(pipe)
|
DALI-main
|
dali/test/python/test_external_source_parallel_pytorch.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# it is enough to just import all functions from test_internals_operator_external_source
# nose will query for the methods available and will run them
# the test_internals_operator_external_source is 99% the same for cupy and numpy tests
# so it is better to store everything in one file and just call `use_cupy`
# to switch between the default numpy and cupy
from nose.plugins.attrib import attr
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.fn as fn
from test_utils import check_output
import torch
from test_external_source_impl import use_torch
import numpy as np
use_torch(True)
# extra tests, GPU-specific
def test_external_source_callback_torch_stream():
with torch.cuda.stream(torch.cuda.Stream()):
for attempt in range(10):
t0 = torch.tensor([attempt * 100 + 1.5], dtype=torch.float32).cuda()
increment = torch.tensor([10], dtype=torch.float32).cuda()
pipe = Pipeline(1, 3, 0)
def gen_batch():
nonlocal t0
t0 += increment
return [t0]
pipe.set_outputs(fn.external_source(gen_batch))
pipe.build()
for i in range(10):
check_output(pipe.run(), [np.array(
[attempt * 100 + (i + 1) * 10 + 1.5], dtype=np.float32)])
def _test_cross_device(src, dst):
import nvidia.dali.fn as fn
import numpy as np
pipe = Pipeline(1, 3, dst)
iter = 0
def get_data():
nonlocal iter
data = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]],
dtype=torch.float32).cuda(device=src) + iter
iter += 1
return data
with pipe:
pipe.set_outputs(fn.external_source(get_data, batch=False, device='gpu'))
pipe.build()
for i in range(10):
out, = pipe.run()
assert np.array_equal(np.array(out[0].as_cpu()), np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) + i)
@attr('multigpu')
def test_cross_device():
if torch.cuda.device_count() > 1:
for src in [0, 1]:
for dst in [0, 1]:
yield _test_cross_device, src, dst
|
DALI-main
|
dali/test/python/test_external_source_pytorch_gpu.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import nvidia.dali.fn as fn
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import numpy as np
from nvidia.dali import Pipeline, pipeline_def
from test_utils import check_batch
from nose_utils import raises, assert_warns, assert_raises
from nvidia.dali.types import DALIDataType
from numpy.random import default_rng
def build_src_pipe(device, layout=None):
if layout is None:
layout = "XY"
batches = [
[
np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32),
np.array([[10, 20], [30, 40], [50, 60]], dtype=np.float32)
],
[
np.array([[9, 10], [11, 12]], dtype=np.float32),
np.array([[100, 200, 300, 400, 500]], dtype=np.float32)
]
]
src_pipe = Pipeline(len(batches), 1, 0)
out_batches = fn.external_source(source=batches, device=device, cycle=True, layout=layout)
src_pipe.set_outputs(out_batches)
src_pipe.build()
return src_pipe, len(batches)
def _test_feed_input(device, is_serialized):
src_pipe, batch_size = build_src_pipe(device)
dst_pipe = Pipeline(batch_size, 1, 0, exec_async=False, exec_pipelined=False)
dst_pipe.set_outputs(fn.external_source(name="ext", device=device))
if is_serialized:
serialized = dst_pipe.serialize()
dst_pipe = None
dst_pipe = Pipeline.deserialize(serialized_pipeline=serialized, batch_size=batch_size,
num_threads=1, device_id=0, exec_async=False,
exec_pipelined=False)
dst_pipe.build()
else:
dst_pipe.build()
for _ in range(3):
out1 = src_pipe.run()
dst_pipe.feed_input("ext", out1[0])
out2 = dst_pipe.run()
check_batch(out2[0], out1[0], batch_size, 0, 0, "XY")
def test_feed_input():
for device in ["cpu", "gpu"]:
for is_serialized in [True, False]:
yield _test_feed_input, device, is_serialized
def _test_callback(device, as_tensors, change_layout_to=None):
src_pipe, batch_size = build_src_pipe(device)
ref_pipe, batch_size = build_src_pipe(device, layout=change_layout_to)
dst_pipe = Pipeline(batch_size, 1, 0)
def get_from_src():
tl = src_pipe.run()[0]
return [tl[i] for i in range(len(tl))] if as_tensors else tl
outs = fn.external_source(source=get_from_src, device=device, layout=change_layout_to)
dst_pipe.set_outputs(outs)
dst_pipe.build()
for iter in range(3):
ref = ref_pipe.run()
out = dst_pipe.run()
check_batch(out[0], ref[0], batch_size, 0, 0)
def test_callback():
for device in ["cpu", "gpu"]:
for as_tensors in [False, True]:
for change_layout in [None, "AB"]:
yield _test_callback, device, as_tensors, change_layout
def _test_scalar(device, as_tensors):
"""Test propagation of scalars from external source"""
batch_size = 4
src_pipe = Pipeline(batch_size, 1, 0)
src_ext = fn.external_source(
source=lambda i: [np.float32(i * 10 + i + 1) for i in range(batch_size)], device=device)
src_pipe.set_outputs(src_ext)
src_pipe.build()
dst_pipe = Pipeline(batch_size, 1, 0, exec_async=False, exec_pipelined=False)
dst_pipe.set_outputs(fn.external_source(name="ext", device=device))
dst_pipe.build()
for iter in range(3):
src = src_pipe.run()
data = src[0]
if as_tensors:
data = [data[i] for i in range(len(data))]
dst_pipe.feed_input("ext", data)
dst = dst_pipe.run()
check_batch(src[0], dst[0], batch_size, 0, 0, "")
def test_scalar():
for device in ["cpu", "gpu"]:
for as_tensors in [False, True]:
yield _test_scalar, device, as_tensors
class BatchCb:
def __init__(self, batch_info, batch_size, epoch_size):
self.batch_info = batch_info
self.batch_size = batch_size
self.epoch_size = epoch_size
def __call__(self, arg):
if self.batch_info:
assert isinstance(arg, types.BatchInfo), \
f"Expected BatchInfo instance as cb argument, got {arg}"
iteration = arg.iteration
epoch_idx = arg.epoch_idx
else:
assert isinstance(arg, int), "Expected integer as cb argument, got {}".format(arg)
iteration = arg
epoch_idx = -1
if iteration >= self.epoch_size:
raise StopIteration
return [np.array([iteration, epoch_idx], dtype=np.int32) for _ in range(self.batch_size)]
class SampleCb:
def __init__(self, batch_size, epoch_size):
self.batch_size = batch_size
self.epoch_size = epoch_size
def __call__(self, sample_info):
if sample_info.iteration >= self.epoch_size:
raise StopIteration
return np.array([
sample_info.idx_in_epoch, sample_info.idx_in_batch,
sample_info.iteration, sample_info.epoch_idx], dtype=np.int32)
def _test_batch_info_flag_default(cb, batch_size):
pipe = Pipeline(batch_size, 1, 0)
with pipe:
ext = fn.external_source(source=cb)
pipe.set_outputs(ext)
pipe.build()
pipe.run()
def test_batch_info_flag_default():
batch_size = 5
cb_int = BatchCb(False, batch_size, 1)
yield _test_batch_info_flag_default, cb_int, batch_size
cb_batch_info = BatchCb(True, batch_size, 1)
yield raises(AssertionError, "Expected BatchInfo instance as cb argument")(
_test_batch_info_flag_default), cb_batch_info, batch_size
def _test_epoch_idx(batch_size, epoch_size, cb, batch_info, batch_mode):
num_epochs = 3
pipe = Pipeline(batch_size, 1, 0)
with pipe:
ext = fn.external_source(source=cb, batch_info=batch_info, batch=batch_mode)
pipe.set_outputs(ext)
pipe.build()
for epoch_idx in range(num_epochs):
for iteration in range(epoch_size):
(batch,) = pipe.run()
assert len(batch) == batch_size
for sample_i, sample in enumerate(batch):
if batch_mode:
expected = np.array([iteration, epoch_idx if batch_info else -1])
else:
expected = np.array([
iteration * batch_size + sample_i,
sample_i, iteration, epoch_idx])
np.testing.assert_array_equal(sample, expected)
try:
pipe.run()
except StopIteration:
pipe.reset()
else:
assert False, "expected StopIteration"
def test_epoch_idx():
batch_size = 3
epoch_size = 4
for batch_info in (True, False):
batch_cb = BatchCb(batch_info, batch_size, epoch_size)
yield _test_epoch_idx, batch_size, epoch_size, batch_cb, batch_info, True
sample_cb = SampleCb(batch_size, epoch_size)
yield _test_epoch_idx, batch_size, epoch_size, sample_cb, None, False
def test_dtype_arg():
batch_size = 2
src_data = [
[np.ones((120, 120, 3), dtype=np.uint8)] * batch_size
]
src_pipe = Pipeline(batch_size, 1, 0)
src_ext = fn.external_source(source=src_data, dtype=DALIDataType.UINT8)
src_pipe.set_outputs(src_ext)
src_pipe.build()
out, = src_pipe.run()
for i in range(batch_size):
t = out.at(i)
assert t.dtype == np.uint8
np.array_equal(t, np.ones((120, 120, 3), dtype=np.uint8))
def test_dtype_arg_multioutput():
batch_size = 2
src_data = [
[[np.ones((120, 120, 3), dtype=np.uint8)] * batch_size,
[np.ones((120, 120, 3), dtype=np.float32)] * batch_size]
]
src_pipe = Pipeline(batch_size, 1, 0)
src_ext, src_ext2 = fn.external_source(source=src_data, num_outputs=2,
dtype=[DALIDataType.UINT8, DALIDataType.FLOAT])
src_pipe.set_outputs(src_ext, src_ext2)
src_pipe.build()
out1, out2 = src_pipe.run()
for i in range(batch_size):
t1 = out1.at(i)
t2 = out2.at(i)
assert t1.dtype == np.uint8
assert np.array_equal(t1, np.ones((120, 120, 3), dtype=np.uint8))
assert t2.dtype == np.float32
assert np.allclose(t2, [np.ones((120, 120, 3), dtype=np.float32)])
@raises(RuntimeError, glob="ExternalSource expected data of type uint8 and got: float")
def test_incorrect_dtype_arg():
batch_size = 2
src_data = [
[np.ones((120, 120, 3), dtype=np.float32)] * batch_size
]
src_pipe = Pipeline(batch_size, 1, 0)
src_ext = fn.external_source(source=src_data, dtype=DALIDataType.UINT8)
src_pipe.set_outputs(src_ext)
src_pipe.build()
src_pipe.run()
@raises(RuntimeError, glob="Type of the data fed to the external source has changed from the "
"previous iteration. Type in the previous iteration was float and "
"the current type is uint8.")
def test_changing_dtype():
batch_size = 2
src_data = [
[np.ones((120, 120, 3), dtype=np.float32)] * batch_size,
[np.ones((120, 120, 3), dtype=np.uint8)] * batch_size
]
src_pipe = Pipeline(batch_size, 1, 0)
src_ext = fn.external_source(source=src_data)
src_pipe.set_outputs(src_ext)
src_pipe.build()
src_pipe.run()
src_pipe.run()
def test_ndim_arg():
batch_size = 2
src_data = [
[np.ones((120, 120, 3), dtype=np.uint8)] * batch_size
]
src_pipe = Pipeline(batch_size, 1, 0)
src_ext1 = fn.external_source(source=src_data, dtype=DALIDataType.UINT8, ndim=3)
src_ext2 = fn.external_source(source=src_data, dtype=DALIDataType.UINT8, layout="HWC")
src_pipe.set_outputs(src_ext1, src_ext2)
src_pipe.build()
out1, out2 = src_pipe.run()
for i in range(batch_size):
t1 = out1.at(i)
t2 = out2.at(i)
assert np.array_equal(t1, np.ones((120, 120, 3), dtype=np.uint8))
assert np.array_equal(t2, np.ones((120, 120, 3), dtype=np.uint8))
def test_ndim_arg_multioutput():
batch_size = 2
src_data = [
[[np.ones((120, 120, 3), dtype=np.uint8)] * batch_size,
[np.ones((120, 120), dtype=np.float32)] * batch_size]
]
src_pipe = Pipeline(batch_size, 1, 0)
src1_ext, src1_ext2 = fn.external_source(
source=src_data, num_outputs=2,
dtype=[DALIDataType.UINT8, DALIDataType.FLOAT],
ndim=[3, 2])
src2_ext, src2_ext2 = fn.external_source(
source=src_data, num_outputs=2,
dtype=[DALIDataType.UINT8, DALIDataType.FLOAT],
layout=["HWC", "HW"])
src_pipe.set_outputs(src1_ext, src1_ext2, src2_ext, src2_ext2)
src_pipe.build()
out11, out12, out21, out22 = src_pipe.run()
for i in range(batch_size):
t1 = out11.at(i)
t2 = out12.at(i)
assert np.array_equal(t1, np.ones((120, 120, 3), dtype=np.uint8))
assert np.allclose(t2, [np.ones((120, 120), dtype=np.float32)])
t3 = out21.at(i)
t4 = out22.at(i)
assert np.array_equal(t3, np.ones((120, 120, 3), dtype=np.uint8))
assert np.allclose(t4, [np.ones((120, 120), dtype=np.float32)])
def test_layout_ndim_match():
batch_size = 2
src_data = [
[[np.ones((120, 120, 3), dtype=np.uint8)] * batch_size,
[np.ones((120, 120), dtype=np.uint8)] * batch_size]
]
src_pipe = Pipeline(batch_size, 1, 0)
src_ext1, src_ext2 = fn.external_source(source=src_data, num_outputs=2,
dtype=DALIDataType.UINT8, layout=["HWC", "HW"],
ndim=[3, 2])
src_pipe.set_outputs(src_ext1, src_ext2)
src_pipe.build()
out1, out2 = src_pipe.run()
for i in range(batch_size):
t1 = out1.at(i)
t2 = out2.at(i)
assert np.array_equal(t1, np.ones((120, 120, 3), dtype=np.uint8))
assert np.allclose(t2, [np.ones((120, 120), dtype=np.uint8)])
@raises(RuntimeError, glob="Number of dimensions in the provided layout does not match the ndim "
"argument. The arguments provided:\n ndim = 2,\n layout: \"HWC\".")
def test_ndim_layout_mismatch():
src_pipe = Pipeline(1, 1, 0)
src_ext = fn.external_source(layout="HWC", ndim=2)
src_pipe.set_outputs(src_ext)
src_pipe.build()
@raises(RuntimeError, glob="ExternalSource expected data with 3 dimensions and got 2 dimensions")
def test_ndim_data_mismatch():
batch_size = 2
src_data = [
[[np.ones((120, 120, 3), dtype=np.uint8)] * batch_size,
[np.ones((120, 120), dtype=np.uint8)] * batch_size]
]
src_pipe = Pipeline(batch_size, 1, 0)
src_ext1, src_ext2 = fn.external_source(source=src_data, num_outputs=2,
dtype=DALIDataType.UINT8, ndim=3)
src_pipe.set_outputs(src_ext1, src_ext2)
src_pipe.build()
src_pipe.run()
@raises(RuntimeError, glob="Number of dimensions of the data fed to the external source has "
"changed from previous iteration. Dimensionality in the previous "
"iteration was 3 and the current is 2.")
def test_ndim_changing():
batch_size = 2
src_data = [
[np.ones((120, 120, 3), dtype=np.uint8)] * batch_size,
[np.ones((120, 120), dtype=np.uint8)] * batch_size
]
src_pipe = Pipeline(batch_size, 1, 0)
src_ext1 = fn.external_source(source=src_data, dtype=DALIDataType.UINT8)
src_pipe.set_outputs(src_ext1)
src_pipe.build()
src_pipe.run()
src_pipe.run()
@raises(RuntimeError, glob="Expected data with layout: \"H\" and got: \"W\"")
def test_layout_data_mismatch():
src_pipe = Pipeline(1, 1, 0, prefetch_queue_depth=1)
src_pipe.set_outputs(fn.external_source(name="input", layout="H"))
src_pipe.build()
src_pipe.feed_input("input", [np.zeros((1))], layout="W")
src_pipe.run()
@raises(RuntimeError, glob="Layout of the data fed to the external source has changed from "
"previous iteration. Layout in the previous iteration was \"W\" "
"and the current is \"H\".")
def test_layout_changing():
src_pipe = Pipeline(1, 1, 0)
src_pipe.set_outputs(fn.external_source(name="input"))
src_pipe.build()
src_pipe.feed_input("input", [np.zeros((1))], layout="W")
src_pipe.feed_input("input", [np.zeros((1))], layout="H")
src_pipe.run()
src_pipe.run()
def _test_partially_utilized_external_source_warning(usage_mask, source_type):
np_rng = np.random.default_rng(12345)
max_batch_size = 8
num_outputs = len(usage_mask)
def rand_batch():
batch_size = np.int32(np_rng.uniform(1, max_batch_size + 1))
return np.float32(np_rng.uniform(-1, 1, shape=(batch_size, 10, 100, 3)))
def rand_tuple(rand_elem):
return tuple(rand_elem() for _ in range(num_outputs))
def sample_cb_source(sample_info):
def rand_sample():
return np.float32(np_rng.uniform(-1, 1, shape=(10, 100, 3)))
return rand_tuple(rand_sample)
def batch_cb_source():
return rand_tuple(rand_batch)
def gen_fun_source():
while True:
yield rand_tuple(rand_batch)
class IteratorSource:
def __iter__(self):
return self
def __next__(self):
return rand_tuple(rand_batch)
sources = {
'sample_cb_source': sample_cb_source,
'batch_cb_source': batch_cb_source,
'gen_fun_source': gen_fun_source,
'generator': gen_fun_source(),
'IteratorSource': IteratorSource()
}
@pipeline_def
def pipeline():
outputs = fn.external_source(source=sources[source_type], num_outputs=num_outputs,
batch=source_type != "sample_cb_source")
assert len(outputs) == num_outputs
utilized_outputs = (out for out, is_used in zip(outputs, usage_mask) if is_used)
return tuple(fn.gaussian_blur(out, window_size=3) for out in utilized_outputs)
pipe = pipeline(batch_size=max_batch_size, num_threads=4, device_id=0)
unused_output_idxs = [i for i, is_used in enumerate(usage_mask) if not is_used]
assert len(unused_output_idxs) > 0
pruned_idx_str = ", ".join(str(idx) for idx in unused_output_idxs)
if len(unused_output_idxs) == 1:
pruned_str = f"output at the index {pruned_idx_str} is"
else:
pruned_str = f"outputs at the indices {pruned_idx_str} are"
expected_error_msg = (
f"The external source node '*{source_type}*' produces {num_outputs} outputs, "
f"but the {pruned_str} not used.")
with assert_warns(Warning, glob=expected_error_msg):
pipe.build()
def test_partially_utilized_external_source_warning():
rng = random.Random(42)
def sources():
while True:
for source in ('sample_cb_source', 'batch_cb_source', 'gen_fun_source', 'generator',
'IteratorSource'):
yield source
source_type = sources()
for num_outputs in (2, 3, 4):
for num_unused in range(1, num_outputs):
unused = rng.sample(list(range(num_outputs)), num_unused)
usage_mask = [i not in unused for i in range(num_outputs)]
yield _test_partially_utilized_external_source_warning, usage_mask, next(source_type)
def _test_partially_utilized_es_old_style(usage_mask):
# check that the build time error on unused external source does not interfere
# with external sources that are manually fed by user provided code
num_outputs = len(usage_mask)
batch_size = 16
batch = np.array(list(range(batch_size * 1024))).reshape(batch_size, 16, 16, 4)
class OldStylePipe(Pipeline):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.inp = ops.ExternalSource(num_outputs=num_outputs)
self.gb = ops.GaussianBlur(window_size=3)
def define_graph(self):
self.all_inputs = self.inp()
assert len(self.all_inputs) == num_outputs
self.utilized_inputs = [
inp for inp, is_used in zip(self.all_inputs, usage_mask) if is_used]
return tuple(self.gb(inp) for inp in self.utilized_inputs)
def iter_setup(self):
assert len(self.utilized_inputs) == sum(usage_mask)
for out in self.utilized_inputs:
self.feed_input(out, batch)
pipe = OldStylePipe(batch_size=batch_size, num_threads=4, device_id=0)
pipe.build()
pipe.run()
def test_partially_utilized_es_old_style():
rng = random.Random(42)
for num_outputs in (2, 3, 4):
for num_unused in range(1, num_outputs):
unused = rng.sample(list(range(num_outputs)), num_unused)
usage_mask = [i not in unused for i in range(num_outputs)]
yield _test_partially_utilized_es_old_style, usage_mask
def _test_non_utilized_external_source_pruning(num_outputs):
max_batch_size = 16
def sample_cb_source(sample_info):
return None
@pipeline_def
def pipeline():
outputs = fn.external_source( # noqa F841
source=sample_cb_source, batch=False,
num_outputs=num_outputs)
data = fn.random.uniform(range=(0, 255), shape=(300, 100, 3))
img = fn.reshape(data, layout="HWC")
return fn.gaussian_blur(img, window_size=3)
pipe = pipeline(batch_size=max_batch_size, num_threads=4, device_id=0)
pipe.build()
pipe.run()
def test_non_utilized_external_source_pruning():
# if all outputs are unused, ES should simply be pruned not preventing pipeline from operation
for num_outputs in (None, 1, 2, 3, 4):
yield _test_non_utilized_external_source_pruning, num_outputs
def test_empty_es():
max_batch_size = 16
@pipeline_def
def pipeline():
return fn.external_source(source=lambda: [])
# Providing an empty batch was legal, but it failed on MakeContiguous node.
# This checks proper validation in External Source which is the only way that could provide
# empty batch as input into DALI graph.
with assert_raises(RuntimeError, glob="*ExternalSource expects non-empty batches*"):
pipe = pipeline(batch_size=max_batch_size, num_threads=4, device_id=0)
pipe.build()
pipe.run()
def to_tensor_list_gpu(data):
@pipeline_def(batch_size=len(data), num_threads=4, device_id=0, prefetch_queue_depth=1)
def convert_pipe():
return fn.external_source(source=[data], device="gpu")
pipe = convert_pipe()
pipe.build()
out, = pipe.run()
return out
def test_repeat_last():
@pipeline_def
def pipeline():
cpu = fn.external_source(name="es_cpu", repeat_last=True)
gpu = fn.external_source(name="es_gpu", repeat_last=True, device="gpu", no_copy=True)
return cpu, gpu
pipe = pipeline(batch_size=4, num_threads=4, device_id=0, prefetch_queue_depth=1)
pipe.build()
data1 = [
np.array([1], dtype=np.int32),
np.array([3], dtype=np.int32),
np.array([42], dtype=np.int32),
np.array([666], dtype=np.int32)
]
data2 = [
np.array([11], dtype=np.int32),
np.array([33], dtype=np.int32),
np.array([422], dtype=np.int32),
np.array([6666], dtype=np.int32)
]
data1_gpu = to_tensor_list_gpu(data1)
data2_gpu = to_tensor_list_gpu(data2)
pipe.feed_input("es_cpu", data1)
pipe.feed_input("es_gpu", data1_gpu)
a, b = pipe.run()
check_batch(a, data1)
check_batch(b, data1)
a, b = pipe.run()
check_batch(a, data1)
check_batch(b, data1)
pipe.feed_input("es_cpu", data2)
a, b = pipe.run()
check_batch(a, data2)
check_batch(b, data1)
pipe.feed_input("es_gpu", data2_gpu)
a, b = pipe.run()
check_batch(a, data2)
check_batch(b, data2)
pipe.feed_input("es_cpu", data1)
a, b = pipe.run()
check_batch(a, data1)
check_batch(b, data2)
def test_repeat_last_queue():
@pipeline_def
def pipeline():
cpu = fn.external_source(name="es_cpu", repeat_last=True)
gpu = fn.external_source(name="es_gpu", repeat_last=True, device="gpu")
return cpu, gpu
pipe = pipeline(batch_size=4, num_threads=4, device_id=0, prefetch_queue_depth=2)
pipe.build()
data1 = [
np.array([1], dtype=np.int32),
np.array([3], dtype=np.int32),
np.array([42], dtype=np.int32),
np.array([666], dtype=np.int32)
]
data2 = [
np.array([11], dtype=np.int32),
np.array([33], dtype=np.int32),
np.array([422], dtype=np.int32),
np.array([6666], dtype=np.int32)
]
data3 = data1
pipe.feed_input("es_cpu", data1)
pipe.feed_input("es_gpu", data1)
a, b = pipe.run()
check_batch(a, data1)
check_batch(b, data1)
a, b = pipe.run()
check_batch(a, data1)
check_batch(b, data1)
pipe.feed_input("es_cpu", data2)
a, b = pipe.run()
check_batch(a, data1) # <- still the old value
check_batch(b, data1)
pipe.feed_input("es_gpu", data3)
a, b = pipe.run()
check_batch(a, data2) # <- new value visible
check_batch(b, data1) # <- still old
pipe.feed_input("es_cpu", data3)
a, b = pipe.run()
check_batch(a, data2) # <- still 2, the most recent change not visible
check_batch(b, data3) # <- new
def _check_repeat_last_var_batch(device):
@pipeline_def
def pipeline():
es = fn.external_source(name="es", repeat_last=True, device=device)
u = fn.random.uniform(range=(0, 0.01))
return es, u
pipe = pipeline(batch_size=4, num_threads=4, device_id=0, prefetch_queue_depth=2)
pipe.build()
data1 = [
np.array([1], dtype=np.int32),
np.array([3], dtype=np.int32),
np.array([42], dtype=np.int32),
np.array([666], dtype=np.int32)
]
data2 = [
np.array([11], dtype=np.int32),
np.array([33], dtype=np.int32),
np.array([422], dtype=np.int32),
]
pipe.feed_input("es", data1)
a, b = pipe.run()
check_batch(a, data1)
assert len(b) == len(data1)
a, b = pipe.run()
check_batch(a, data1)
assert len(b) == len(data1)
pipe.feed_input("es", data2)
a, b = pipe.run()
check_batch(a, data1) # <- still the old value
assert len(b) == len(data1)
a, b = pipe.run() # <- new value visible
check_batch(a, data2)
assert len(b) == len(data2)
pipe.feed_input("es", data1)
a, b = pipe.run()
check_batch(a, data2) # <- still 2, the most recent change not visible
assert len(b) == len(data2)
a, b = pipe.run() # <- new value visible
check_batch(a, data1)
assert len(b) == len(data1)
def test_repeat_last_var_batch():
for device in ['cpu', 'gpu']:
yield _check_repeat_last_var_batch, device
def _check_blocking(device):
batch_size = 5
prefetch_queue_depth = 10
@pipeline_def
def test_pipeline():
data = fn.external_source(dtype=types.INT32, name="test_source", blocking=True,
device=device)
return data
rng = default_rng()
data_to_feed = rng.random(size=(batch_size, 4, 6, 2)).astype(dtype=np.int32)
pipe = test_pipeline(batch_size=batch_size, num_threads=2, device_id=0, seed=12,
prefetch_queue_depth=prefetch_queue_depth)
pipe.build()
pipe.feed_input("test_source", data_to_feed)
for _ in range(5):
out = pipe.run()[0].as_tensor()
if device == "gpu":
out = out.as_cpu()
assert np.all(np.equal(np.array(out), data_to_feed))
data_to_feed = rng.random(size=(batch_size, 4, 6, 2)).astype(dtype=np.int32)
pipe.feed_input("test_source", data_to_feed)
# make sure that the pipeline is not waiting for data preventing it from being deleted
for _ in range(prefetch_queue_depth):
pipe.feed_input("test_source", data_to_feed)
def test_blocking():
for device in ['cpu', 'gpu']:
yield _check_blocking, device
def _blocking_destructor(device):
batch_size = 5
prefetch_queue_depth = 5
@pipeline_def
def test_pipeline():
data = fn.external_source(dtype=types.INT32, name="test_source", blocking=True,
device=device)
return data
rng = default_rng()
data_to_feed = rng.random(size=(batch_size, 4, 6, 2)).astype(dtype=np.int32)
pipe = test_pipeline(batch_size=batch_size, num_threads=2, device_id=0, seed=12,
prefetch_queue_depth=prefetch_queue_depth)
pipe.build()
# feed one input to pipeline can return something
pipe.feed_input("test_source", data_to_feed)
# should not hang
_ = pipe.run()
def test_blocking_destructor():
for device in ['cpu', 'gpu']:
yield _blocking_destructor, device
|
DALI-main
|
dali/test/python/test_external_source_dali.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import platform
import random
import cv2
from nvidia.dali.pipeline import pipeline_def
from nvidia.dali import fn, types
from test_utils import get_dali_extra_path, get_arch
from nose_utils import raises, assert_raises
from nose import SkipTest
test_data_root = get_dali_extra_path()
images_dir = os.path.join(test_data_root, 'db', 'imgproc')
is_of_supported_var = None
def is_of_supported(device_id=0):
global is_of_supported_var
if is_of_supported_var is not None:
return is_of_supported_var
driver_version_major = 0
try:
import pynvml
pynvml.nvmlInit()
driver_version = pynvml.nvmlSystemGetDriverVersion().decode('utf-8')
driver_version_major = int(driver_version.split('.')[0])
except ModuleNotFoundError:
print("NVML not found")
# there is an issue with OpticalFlow driver in R495 and newer on aarch64 platform
is_of_supported_var = get_arch(device_id) >= 7.5 and (
platform.machine() == "x86_64" or driver_version_major < 495)
return is_of_supported_var
def get_mapping(shape):
h, w = shape
x = np.arange(w, dtype=np.float32) + 0.5
y = np.arange(h, dtype=np.float32) + 0.5
xy = np.transpose([np.tile(x, h), np.repeat(y, w)]).reshape([h, w, 2])
center = np.array([[[w * 0.5, h * 0.5]]])
d = xy - center
dnorm = np.linalg.norm(d, ord=2, axis=2)
dexp1 = dnorm * (7 / np.sqrt(w * w + h * h))
dexp2 = dnorm * (9 / np.sqrt(w * w + h * h))
mag = np.exp(-dexp1 ** 2) - np.exp(-dexp2 ** 2)
od = d + 0
od[:, :, 0] = d[:, :, 0] * (1 - mag) + d[:, :, 1] * mag
od[:, :, 1] = d[:, :, 1] * (1 - mag) + d[:, :, 0] * mag
ofs = od - d
return xy, ofs
def load_frames(sample_info=types.SampleInfo(0, 0, 0, 0), hint_grid=None):
img = cv2.imread(os.path.join(images_dir, 'alley.png'))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if sample_info.idx_in_epoch % 2:
img = cv2.resize(
img, dsize=(img.shape[0] // 2, img.shape[1] // 2),
interpolation=cv2.INTER_AREA)
xy, ofs = get_mapping(img.shape[:2])
remap = (xy + ofs - np.array([[[0.5, 0.5]]])).astype(np.float32)
warped = cv2.remap(img, remap, None, interpolation=cv2.INTER_LINEAR)
result = np.array([img, warped])
if hint_grid is not None:
result = [result]
result.append(np.zeros(shape=result[0].shape, dtype=np.uint8))
return result
@pipeline_def(batch_size=1, seed=16)
def of_pipeline(output_grid=1, hint_grid=1, use_temporal_hints=False):
if hint_grid is not None:
seq, hint = fn.external_source(
lambda info: load_frames(info, hint_grid),
layout=["FHWC", "FHWC"], batch=False, num_outputs=2)
of = fn.optical_flow(
seq.gpu(), hint.gpu(), device="gpu", output_grid=output_grid,
hint_grid=hint_grid, enable_temporal_hints=use_temporal_hints)
else:
seq = fn.external_source(
lambda info: load_frames(info, hint_grid),
layout="FHWC", batch=False)
of = fn.optical_flow(
seq.gpu(), device="gpu", output_grid=output_grid,
enable_temporal_hints=use_temporal_hints)
return seq, of
def make_colorwheel():
'''
Generates a color wheel for optical flow visualization as presented in:
Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
'''
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros((ncols, 3))
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY) / RY)
col = col + RY
# YG
colorwheel[col:col + YG, 0] = 255 - np.floor(255 * np.arange(0, YG) / YG)
colorwheel[col:col + YG, 1] = 255
col = col + YG
# GC
colorwheel[col:col + GC, 1] = 255
colorwheel[col:col + GC, 2] = np.floor(255 * np.arange(0, GC) / GC)
col = col + GC
# CB
colorwheel[col:col + CB, 1] = 255 - np.floor(255 * np.arange(CB) / CB)
colorwheel[col:col + CB, 2] = 255
col = col + CB
# BM
colorwheel[col:col + BM, 2] = 255
colorwheel[col:col + BM, 0] = np.floor(255 * np.arange(0, BM) / BM)
col = col + BM
# MR
colorwheel[col:col + MR, 2] = 255 - np.floor(255 * np.arange(MR) / MR)
colorwheel[col:col + MR, 0] = 255
return colorwheel
def flow_compute_color(u, v, convert_to_bgr=False):
'''
Applies the flow color wheel to (possibly clipped) flow components u and v.
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
:param u: np.ndarray, input horizontal flow
:param v: np.ndarray, input vertical flow
:param convert_to_bgr: bool, whether to change ordering and output BGR instead of RGB
:return:
'''
flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
colorwheel = make_colorwheel() # shape [55x3]
ncols = colorwheel.shape[0]
rad = np.sqrt(np.square(u) + np.square(v))
a = np.arctan2(-v, -u) / np.pi
fk = (a + 1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(np.int32)
k1 = k0 + 1
k1[k1 == ncols] = 1
f = fk - k0
for i in range(colorwheel.shape[1]):
tmp = colorwheel[:, i]
col0 = tmp[k0] / 255.0
col1 = tmp[k1] / 255.0
col = (1 - f) * col0 + f * col1
idx = (rad <= 1)
col[idx] = 1 - rad[idx] * (1 - col[idx])
col[~idx] = col[~idx] * 0.75 # out of range?
# Note the 2-i => BGR instead of RGB
ch_idx = 2 - i if convert_to_bgr else i
flow_image[:, :, ch_idx] = np.floor(255 * col)
return flow_image
def flow_to_color(flow_uv, clip_flow=None, convert_to_bgr=False):
'''
Expects a two dimensional flow image of shape [H,W,2]
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
:param flow_uv: np.ndarray of shape [H,W,2]
:param clip_flow: float, maximum clipping value for flow
:return:
'''
assert flow_uv.ndim == 3, 'input flow must have three dimensions'
assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
if clip_flow is not None:
flow_uv = np.clip(flow_uv, 0, clip_flow)
u = flow_uv[:, :, 0]
v = flow_uv[:, :, 1]
rad = np.sqrt(np.square(u) + np.square(v))
rad_max = np.max(rad)
epsilon = 1e-5
u = u / (rad_max + epsilon)
v = v / (rad_max + epsilon)
return flow_compute_color(u, v, convert_to_bgr)
interactive = False
def check_optflow(output_grid=1, hint_grid=1, use_temporal_hints=False):
batch_size = 3
pipe = of_pipeline(batch_size=batch_size, num_threads=3, device_id=0, output_grid=output_grid,
hint_grid=hint_grid, use_temporal_hints=use_temporal_hints)
pipe.build()
if get_arch() < 8:
if output_grid != 4:
assert_raises(RuntimeError, pipe.run,
glob="grid size: * is not supported, supported are:")
raise SkipTest('Skipped as grid size is not supported for this arch')
elif hint_grid not in [4, 8, None]:
assert_raises(RuntimeError, pipe.run,
glob="hint grid size: * is not supported, supported are:")
raise SkipTest('Skipped as hint grid size is not supported for this arch')
for _ in range(2):
out = pipe.run()
for i in range(batch_size):
seq = out[0].at(i)
out_field = out[1].as_cpu().at(i)[0]
_, ref_field = get_mapping(seq.shape[1:3])
dsize = (out_field.shape[1], out_field.shape[0])
ref_field = cv2.resize(ref_field, dsize=dsize, interpolation=cv2.INTER_AREA)
if interactive:
cv2.imshow("out", flow_to_color(out_field, None, True))
cv2.imshow("ref", flow_to_color(ref_field, None, True))
print(np.max(out_field))
print(np.max(ref_field))
cv2.imshow("dif", flow_to_color(ref_field - out_field, None, True))
cv2.waitKey(0)
err = np.linalg.norm(ref_field - out_field, ord=2, axis=2)
assert np.mean(err) < 1 # average error of less than one pixel
assert np.max(err) < 100 # no point more than 100px off
assert np.sum(err > 1) / np.prod(err.shape) < 0.1 # 90% are within 1px
assert np.sum(err > 2) / np.prod(err.shape) < 0.05 # 95% are within 2px
def test_optflow():
if not is_of_supported():
raise SkipTest('Optical Flow is not supported on this platform')
for output_grid in [1, 2, 4]:
hint_grid = random.choice([None, 1, 2, 4, 8])
for use_temporal_hints in [True, False]:
yield check_optflow, output_grid, hint_grid, use_temporal_hints
@raises(RuntimeError, "Output grid size: 3 is not supported, supported are:")
def test_wrong_out_grid_size():
pipe = of_pipeline(num_threads=3, device_id=0, output_grid=3)
pipe.build()
pipe.run()
@raises(RuntimeError, "Hint grid size: 3 is not supported, supported are:")
def test_wrong_hint_grid_size():
pipe = of_pipeline(num_threads=3, device_id=0, output_grid=4, hint_grid=3)
pipe.build()
pipe.run()
|
DALI-main
|
dali/test/python/test_optical_flow.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
import random as random
import tensorflow as tf
from nose.tools import with_setup
from nose_utils import raises
from test_dali_tf_dataset_pipelines import FixedSampleIterator, external_source_tester, \
external_source_converter_with_fixed_value, external_source_converter_with_callback, \
RandomSampleIterator, external_source_converter_multiple, get_min_shape_helper, \
external_source_tester_multiple
from test_dali_tf_es_pipelines import external_source_to_tf_dataset, \
gen_tf_with_dali_external_source, get_external_source_pipe
from test_utils_tensorflow import run_tf_dataset_graph, skip_inputs_for_incompatible_tf, \
run_dataset_in_graph, run_tf_dataset_multigpu_graph_manual_placement, \
get_dali_dataset_from_pipeline, get_image_pipeline
tf.compat.v1.disable_eager_execution()
def test_tf_dataset_gpu():
run_tf_dataset_graph('gpu')
def test_tf_dataset_cpu():
run_tf_dataset_graph('cpu')
def run_tf_dataset_with_constant_input(dev, shape, value, dtype, batch):
tensor = np.full(shape, value, dtype)
run_tf_dataset_graph(
dev,
get_pipeline_desc=external_source_tester(
shape, dtype, FixedSampleIterator(tensor), batch=batch),
to_dataset=external_source_converter_with_fixed_value(shape, dtype, tensor, batch))
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_dataset_with_constant_input():
for dev in ['cpu', 'gpu']:
for shape in [(7, 42), (64, 64, 3), (3, 40, 40, 4)]:
for dtype in [np.uint8, np.int32, np.float32]:
for batch in ["dataset", True, False, None]:
value = random.choice([42, 255])
yield run_tf_dataset_with_constant_input, dev, shape, value, dtype, batch
def run_tf_dataset_with_random_input(dev, max_shape, dtype, batch):
min_shape = get_min_shape_helper(batch, max_shape)
iterator = RandomSampleIterator(max_shape, dtype(0), min_shape=min_shape)
run_tf_dataset_graph(
dev,
get_pipeline_desc=external_source_tester(max_shape, dtype, iterator, batch=batch),
to_dataset=external_source_converter_with_callback(RandomSampleIterator, max_shape, dtype,
0, 1e10, min_shape, batch=batch))
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_dataset_with_random_input():
for dev in ['cpu', 'gpu']:
for max_shape in [(10, 20), (120, 120, 3), (3, 40, 40, 4)]:
for dtype in [np.uint8, np.int32, np.float32]:
for batch in ["dataset", True, False, None]:
yield run_tf_dataset_with_random_input, dev, max_shape, dtype, batch
# Run with everything on GPU (External Source op as well)
def run_tf_dataset_with_random_input_gpu(max_shape, dtype, batch):
min_shape = get_min_shape_helper(batch, max_shape)
iterator = RandomSampleIterator(max_shape, dtype(0), min_shape=min_shape)
run_tf_dataset_graph(
"gpu",
get_pipeline_desc=external_source_tester(max_shape, dtype, iterator, "gpu", batch=batch),
to_dataset=external_source_converter_with_callback(RandomSampleIterator, max_shape, dtype,
0, 1e10, min_shape, batch=batch))
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_dataset_with_random_input_gpu():
for max_shape in [(10, 20), (120, 120, 3), (3, 40, 40, 4)]:
for dtype in [np.uint8, np.int32, np.float32]:
for batch in ["dataset", True, False, None]:
yield run_tf_dataset_with_random_input_gpu, max_shape, dtype, batch
def run_tf_dataset_no_copy(max_shape, dtype, dataset_dev, es_dev, no_copy):
run_tf_dataset_graph(
dataset_dev,
get_pipeline_desc=external_source_tester(
max_shape, dtype, RandomSampleIterator(max_shape, dtype(0)), es_dev, no_copy),
to_dataset=external_source_converter_with_callback(RandomSampleIterator, max_shape, dtype))
# Check if setting no_copy flags in all placement scenarios is ok as we override it internally
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_dataset_with_no_copy():
for max_shape in [(10, 20), (120, 120, 3)]:
for dataset_dev in ["cpu", "gpu"]:
for es_dev in ["cpu", "gpu"]:
if dataset_dev == "cpu" and es_dev == "gpu":
continue # GPU op in CPU dataset not supported
for no_copy in [True, False, None]:
yield run_tf_dataset_no_copy, max_shape, np.uint8, dataset_dev, es_dev, no_copy
def run_tf_dataset_with_stop_iter(dev, max_shape, dtype, stop_samples):
run_tf_dataset_graph(
dev,
to_stop_iter=True,
get_pipeline_desc=external_source_tester(
max_shape, dtype,
RandomSampleIterator(max_shape, dtype(0), start=0, stop=stop_samples)),
to_dataset=external_source_converter_with_callback(
RandomSampleIterator, max_shape, dtype, 0, stop_samples))
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_dataset_with_stop_iter():
batch_size = 12
for dev in ['cpu', 'gpu']:
for max_shape in [(10, 20), (120, 120, 3), (3, 40, 40, 4)]:
for dtype in [np.uint8, np.int32, np.float32]:
for iters in [1, 2, 3, 4, 5]:
yield run_tf_dataset_with_stop_iter, dev, \
max_shape, dtype, iters * batch_size - 3
def run_tf_dataset_multi_input(dev, start_values, input_names, batches):
run_tf_dataset_graph(
dev,
get_pipeline_desc=external_source_tester_multiple(start_values, input_names, batches),
to_dataset=external_source_converter_multiple(start_values, input_names, batches))
start_values = [
[np.full((2, 4), -42, dtype=np.int64), np.full((3, 5), -123.0, dtype=np.float32)],
[np.full((3, 5), -3.14, dtype=np.float32)],
[np.full((2, 4), -42, dtype=np.int64), np.full((3, 5), -666.0, dtype=np.float32),
np.full((1, 7), 5, dtype=np.int8)]
]
input_names = [["input_{}".format(i) for i, _ in enumerate(vals)] for vals in start_values]
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_dataset_multi_input():
for dev in ['cpu', 'gpu']:
for starts, names in zip(start_values, input_names):
yield run_tf_dataset_multi_input, dev, starts, names, ["dataset" for _ in input_names]
for batches in list(itertools.product([True, False], repeat=len(input_names))):
yield run_tf_dataset_multi_input, dev, starts, names, batches
def run_tf_with_dali_external_source(dev, es_args, ed_dev, dtype, *_):
run_tf_dataset_graph(
dev,
get_pipeline_desc=get_external_source_pipe(es_args, dtype, ed_dev),
to_dataset=external_source_to_tf_dataset,
to_stop_iter=True)
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_with_dali_external_source():
yield from gen_tf_with_dali_external_source(run_tf_with_dali_external_source)
tf_dataset_wrong_placement_error_msg = r"TF device and DALI device mismatch. " \
r"TF device: [\w]*, DALI device: [\w]* for output"
@raises(Exception, regex=tf_dataset_wrong_placement_error_msg)
def test_tf_dataset_wrong_placement_cpu():
batch_size = 12
num_threads = 4
iterations = 10
pipeline = get_image_pipeline(batch_size, num_threads, 'cpu', 0)
with tf.device('/gpu:0'):
dataset = get_dali_dataset_from_pipeline(pipeline, 'gpu', 0)
run_dataset_in_graph(dataset, iterations)
@raises(Exception, regex=tf_dataset_wrong_placement_error_msg)
def test_tf_dataset_wrong_placement_gpu():
batch_size = 12
num_threads = 4
iterations = 10
pipeline = get_image_pipeline(batch_size, num_threads, 'gpu', 0)
with tf.device('/cpu:0'):
dataset = get_dali_dataset_from_pipeline(pipeline, 'cpu', 0)
run_dataset_in_graph(dataset, iterations)
# This test should be private (name starts with _) as it is called separately in L1
def _test_tf_dataset_other_gpu():
run_tf_dataset_graph('gpu', 1)
# This test should be private (name starts with _) as it is called separately in L1
def _test_tf_dataset_multigpu_manual_placement():
run_tf_dataset_multigpu_graph_manual_placement()
|
DALI-main
|
dali/test/python/test_dali_tf_dataset_graph.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali.ops as ops
import os
from nvidia.dali.pipeline import Pipeline
from test_utils import get_dali_extra_path
from nose_utils import assert_raises
DALI_EXTRA_PATH = get_dali_extra_path()
EPOCH_SIZE = 32
BATCH_SIZE = 1
class DetectionPipeline(Pipeline):
def __init__(self, batch_size, device_id, file_root, annotations_file):
super().__init__(batch_size, 2, device_id, True, 12)
# Reading COCO dataset
self.input = ops.readers.COCO(
file_root=file_root,
annotations_file=annotations_file,
shard_id=device_id,
num_shards=1,
ratio=True,
ltrb=True)
def define_graph(self):
inputs, boxes, labels = self.input(name="Reader")
return inputs, boxes.gpu(), labels
def data_paths():
root = os.path.join(DALI_EXTRA_PATH, 'db', 'coco', 'images')
annotations = os.path.join(DALI_EXTRA_PATH, 'db', 'coco', 'instances.json')
return root, annotations
##############
# Unit tests #
##############
def test_mxnet_pipeline_dynamic_shape():
from nvidia.dali.plugin.mxnet import DALIGenericIterator as MXNetIterator
root, annotations = data_paths()
pipeline = DetectionPipeline(BATCH_SIZE, 0, root, annotations)
train_loader = MXNetIterator([pipeline], [('data', MXNetIterator.DATA_TAG),
('bboxes', MXNetIterator.LABEL_TAG),
('label', MXNetIterator.LABEL_TAG)],
EPOCH_SIZE, auto_reset=False, dynamic_shape=True)
for data in train_loader:
assert data is not None
def test_pytorch_pipeline_dynamic_shape():
from nvidia.dali.plugin.pytorch import DALIGenericIterator as PyTorchIterator
root, annotations = data_paths()
pipeline = DetectionPipeline(BATCH_SIZE, 0, root, annotations)
train_loader = PyTorchIterator([pipeline], ['data', 'bboxes', 'label'], EPOCH_SIZE,
auto_reset=False, dynamic_shape=True)
for data in train_loader:
assert data is not None
def test_paddle_pipeline_dynamic_shape():
from nvidia.dali.plugin.paddle import DALIGenericIterator as PaddleIterator
root, annotations = data_paths()
pipeline = DetectionPipeline(BATCH_SIZE, 0, root, annotations)
train_loader = PaddleIterator([pipeline], ['data', 'bboxes', 'label'],
EPOCH_SIZE, auto_reset=False,
dynamic_shape=True)
for data in train_loader:
assert data is not None
def test_api_fw_check1_pytorch():
from nvidia.dali.plugin.pytorch import DALIGenericIterator as PyTorchIterator
yield from test_api_fw_check1(PyTorchIterator, ['data', 'bboxes', 'label'])
def test_api_fw_check1_mxnet():
from nvidia.dali.plugin.mxnet import DALIGenericIterator as MXNetIterator
yield from test_api_fw_check1(MXNetIterator, [('data', MXNetIterator.DATA_TAG),
('bboxes', MXNetIterator.LABEL_TAG),
('label', MXNetIterator.LABEL_TAG)])
def test_api_fw_check1_paddle():
from nvidia.dali.plugin.paddle import DALIGenericIterator as PaddleIterator
yield from test_api_fw_check1(PaddleIterator, ['data', 'bboxes', 'label'])
def test_api_fw_check1(iter_type, data_definition):
root, annotations = data_paths()
pipe = DetectionPipeline(BATCH_SIZE, 0, root, annotations)
train_loader = iter_type([pipe], data_definition, EPOCH_SIZE, auto_reset=False,
dynamic_shape=True)
train_loader.__next__()
for method in [pipe.schedule_run, pipe.share_outputs, pipe.release_outputs, pipe.outputs,
pipe.run]:
with assert_raises(
RuntimeError,
glob="Mixing pipeline API type. Currently used: PipelineAPIType.ITERATOR,"
" but trying to use PipelineAPIType.*"):
method()
# disable check
pipe.enable_api_check(False)
for method in [pipe.schedule_run, pipe.share_outputs, pipe.release_outputs, pipe.outputs,
pipe.run]:
try:
method()
except RuntimeError:
assert False
yield check, iter_type
def test_api_fw_check2_mxnet():
from nvidia.dali.plugin.mxnet import DALIGenericIterator as MXNetIterator
yield from test_api_fw_check2(MXNetIterator, [('data', MXNetIterator.DATA_TAG),
('bboxes', MXNetIterator.LABEL_TAG),
('label', MXNetIterator.LABEL_TAG)])
def test_api_fw_check2_pytorch():
from nvidia.dali.plugin.pytorch import DALIGenericIterator as PyTorchIterator
yield from test_api_fw_check2(PyTorchIterator, ['data', 'bboxes', 'label'])
def test_api_fw_check2_paddle():
from nvidia.dali.plugin.paddle import DALIGenericIterator as PaddleIterator
yield from test_api_fw_check2(PaddleIterator, ['data', 'bboxes', 'label'])
def test_api_fw_check2(iter_type, data_definition):
root, annotations = data_paths()
pipe = DetectionPipeline(BATCH_SIZE, 0, root, annotations)
pipe.build()
pipe.schedule_run()
pipe.share_outputs()
pipe.release_outputs()
pipe.schedule_run()
pipe.outputs()
with assert_raises(
RuntimeError,
glob=("Mixing pipeline API type. Currently used: PipelineAPIType.SCHEDULED,"
" but trying to use PipelineAPIType.ITERATOR")):
train_loader = iter_type([pipe], data_definition, EPOCH_SIZE, auto_reset=False,
dynamic_shape=True)
train_loader.__next__()
# disable check
pipe.enable_api_check(False)
try:
train_loader = iter_type([pipe], data_definition, EPOCH_SIZE, auto_reset=False,
dynamic_shape=True)
train_loader.__next__()
assert True
except RuntimeError:
assert False
yield check, iter_type
def check(iter_type):
pass
|
DALI-main
|
dali/test/python/test_fw_iterators_detection.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from nose.tools import with_setup
import test_dali_tf_dataset_mnist as mnist
from test_utils_tensorflow import skip_for_incompatible_tf, available_gpus
from nose_utils import raises
from nose import SkipTest
from distutils.version import LooseVersion
tf.compat.v1.enable_eager_execution()
def test_keras_single_gpu():
mnist.run_keras_single_device('gpu', 0)
def test_keras_single_other_gpu():
mnist.run_keras_single_device('gpu', 1)
def test_keras_single_cpu():
mnist.run_keras_single_device('cpu', 0)
@with_setup(skip_for_incompatible_tf)
@raises(Exception, "TF device and DALI device mismatch")
def test_keras_wrong_placement_gpu():
with tf.device('cpu:0'):
model = mnist.keras_model()
train_dataset = mnist.get_dataset('gpu', 0)
model.fit(
train_dataset,
epochs=mnist.EPOCHS,
steps_per_epoch=mnist.ITERATIONS)
@with_setup(skip_for_incompatible_tf)
@raises(Exception, "TF device and DALI device mismatch")
def test_keras_wrong_placement_cpu():
with tf.device('gpu:0'):
model = mnist.keras_model()
train_dataset = mnist.get_dataset('cpu', 0)
model.fit(
train_dataset,
epochs=mnist.EPOCHS,
steps_per_epoch=mnist.ITERATIONS)
@with_setup(skip_for_incompatible_tf)
def test_keras_multi_gpu_mirrored_strategy():
# due to compatibility problems between the driver, cuda version and
# TensorFlow 2.12 test_keras_multi_gpu_mirrored_strategy doesn't work.
if LooseVersion(tf.__version__) >= LooseVersion('2.12.0'):
raise SkipTest("This test is not supported for TensorFlow 2.12")
strategy = tf.distribute.MirroredStrategy(devices=available_gpus())
with strategy.scope():
model = mnist.keras_model()
train_dataset = mnist.get_dataset_multi_gpu(strategy)
model.fit(
train_dataset,
epochs=mnist.EPOCHS,
steps_per_epoch=mnist.ITERATIONS)
assert model.evaluate(
train_dataset,
steps=mnist.ITERATIONS)[1] > mnist.TARGET
@with_setup(mnist.clear_checkpoints, mnist.clear_checkpoints)
def test_estimators_single_gpu():
mnist.run_estimators_single_device('gpu', 0)
@with_setup(mnist.clear_checkpoints, mnist.clear_checkpoints)
def test_estimators_single_other_gpu():
mnist.run_estimators_single_device('gpu', 1)
@with_setup(mnist.clear_checkpoints, mnist.clear_checkpoints)
def test_estimators_single_cpu():
mnist.run_estimators_single_device('cpu', 0)
|
DALI-main
|
dali/test/python/test_dali_tf_dataset_mnist_eager.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import collections
import nose.case
import nose.inspector
import nose.loader
import nose.suite
import nose.plugins.attrib
if sys.version_info >= (3, 10) and not hasattr(collections, "Callable"):
nose.case.collections = collections.abc
nose.inspector.collections = collections.abc
nose.loader.collections = collections.abc
nose.suite.collections = collections.abc
nose.plugins.attrib.collections = collections.abc
import nose.tools as tools
import re
import fnmatch
def glob_to_regex(glob):
if not isinstance(glob, str):
raise ValueError("Glob pattern must be a string")
pattern = fnmatch.translate(glob)
# fnmatch adds special character to match the end of the string, so that when used
# with re.match it, by default, matches the whole string. Here, it's going to be used
# with re.search so it would be weird to enforce matching the suffix, but not the prefix.
if pattern[-2:] == r"\Z":
pattern = pattern[:-2]
return pattern
def get_pattern(glob=None, regex=None, match_case=None):
assert glob is not None or regex is not None
if glob is not None and regex is not None:
raise ValueError(
"You should specify at most one of `glob` and `regex` parameters but not both")
if glob is not None:
pattern = glob_to_regex(glob)
else: # regex is not None
if match_case is not None and not isinstance(regex, str):
raise ValueError(
"Regex must be a string if `match_case` is specified when "
"calling assert_raises_pattern")
pattern = regex
if isinstance(pattern, str) and not match_case: # ignore case by default
pattern = re.compile(pattern, re.IGNORECASE)
return pattern
def assert_raises(exception, *args, glob=None, regex=None, match_case=None, **kwargs):
"""
Wrapper combining `nose.tools.assert_raises` and `nose.tools.assert_raises_regex`.
Specify ``regex=pattern`` or ``glob=pattern`` to check error message of expected exception
against the pattern.
Value for `glob` must be a string, `regex` can be either a literal or compiled regex pattern.
By default, the check will ignore case, if called with `glob` or a literal for `regex`.
To enforce case sensitive check pass ``match_case=True``.
Don't specify `match_case` if passing already compiled regex pattern.
"""
if glob is None and regex is None:
return tools.assert_raises(exception, *args, **kwargs)
pattern = get_pattern(glob, regex, match_case)
return tools.assert_raises_regex(exception, pattern, *args, **kwargs)
def assert_warns(exception=Warning, *args, glob=None, regex=None, match_case=None, **kwargs):
if glob is None and regex is None:
return tools.assert_warns(exception, *args, **kwargs)
pattern = get_pattern(glob, regex, match_case)
return tools.assert_warns_regex(exception, pattern, *args, **kwargs)
def raises(exception, glob=None, regex=None, match_case=None):
"""
To assert that the test case raises Exception with the message matching given glob pattern
@raises(Exception, "abc * def")
def test():
raise Exception("It's: abc 42 def, and has some suffix.")
To assert that the test case raises Exception with the message matching given regex pattern
@raises(Exception, regex="abc[0-9]{2}def")
def test():
raise Exception("It's: abc42def, and has some suffix too.")
You can also use it like regular nose.raises
@raises(Exception)
def test():
raise Exception("This message is not checked")
By default, the check is not case-sensitive, to change that pass `match_case`=True.
You can pass a tuple of exception classes to assert that the raised exception is
an instance of at least one of the classes.
"""
def decorator(func):
def new_func(*args, **kwargs):
with assert_raises(exception, glob=glob, regex=regex, match_case=match_case):
return func(*args, **kwargs)
return tools.make_decorator(func)(new_func)
return decorator
|
DALI-main
|
dali/test/python/nose_utils.py
|
# Copyright (c) 2019, 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import os
import random
from functools import partial
from nvidia.dali.pipeline import Pipeline
from nvidia.dali import fn, pipeline_def
from nvidia.dali.python_function_plugin import current_dali_stream
test_data_root = os.environ['DALI_EXTRA_PATH']
images_dir = os.path.join(test_data_root, 'db', 'single', 'jpeg')
def setup_pytorch():
global torch_dlpack
global torch
import torch
import torch.utils.dlpack as torch_dlpack
global torch_stream
torch_stream = torch.cuda.Stream()
def setup_mxnet():
global mxnd
from mxnet import ndarray as mxnd
def setup_cupy():
global cupy
global cupy_stream
global square_diff_kernel
global mix_channels_kernel
global gray_scale_kernel
import cupy as cupy
cupy_stream = cupy.cuda.Stream()
square_diff_kernel = cupy.ElementwiseKernel(
'T x, T y',
'T z',
'z = x*x - y*y',
'square_diff'
)
mix_channels_kernel = cupy.ElementwiseKernel(
'uint8 x, uint8 y',
'uint8 z',
'z = (i % 3) ? x : y',
'mix_channels'
)
gray_scale_kernel = cupy.RawKernel(r'''
extern "C" __global__
void gray_scale(float *output, const unsigned char *input, long long height, long long width) {
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
if (tidx < width && tidy < height) {
float r = input[tidy * width + tidx] / 255.;
float g = input[tidy * width + tidx + 1] / 255.;
float b = input[tidy * width + tidx + 2] / 255.;
output[tidy * width + tidx] = 0.299 * r + 0.59 * g + 0.11 * b;
}
}
''', 'gray_scale')
def random_seed():
return int(random.random() * (1 << 32))
DEVICE_ID = 0
BATCH_SIZE = 8
ITERS = 32
SEED = random_seed()
NUM_WORKERS = 6
class CommonPipeline(Pipeline):
def __init__(self, device):
super().__init__(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, seed=SEED, prefetch_queue_depth=2)
self.input = ops.readers.File(file_root=images_dir)
self.decode = ops.decoders.Image(device='mixed' if device == 'gpu' else 'cpu',
output_type=types.RGB, hw_decoder_load=0)
self.resize = ops.Resize(resize_x=400, resize_y=400, device=device)
self.flip = ops.Flip(device=device)
def load(self):
jpegs, labels = self.input()
decoded = self.decode(jpegs)
return self.resize(decoded)
class LoadingPipeline(CommonPipeline):
def __init__(self, device):
super().__init__(device)
def define_graph(self):
im = self.load()
im2 = self.load()
return im, self.flip(im2)
class DLTensorOpPipeline(CommonPipeline):
def __init__(self, function, device, synchronize=True):
super(DLTensorOpPipeline, self).__init__(device)
self.op = ops.DLTensorPythonFunction(function=function, device=device, num_outputs=2,
synchronize_stream=synchronize)
def define_graph(self):
im = self.load()
im2 = self.load()
return self.op(im, self.flip(im2))
def pytorch_adapter(fun, in1, in2):
with torch.cuda.stream(torch_stream):
tin1 = [torch_dlpack.from_dlpack(dltensor) for dltensor in in1]
tin2 = [torch_dlpack.from_dlpack(dltensor) for dltensor in in2]
tout1, tout2 = fun(tin1, tin2)
out1, out2 = [torch_dlpack.to_dlpack(tout) for tout in tout1], \
[torch_dlpack.to_dlpack(tout) for tout in tout2]
torch_stream.synchronize()
return out1, out2
def pytorch_wrapper(fun):
return lambda in1, in2: pytorch_adapter(fun, in1, in2)
def common_case(wrapped_fun, device, compare, synchronize=True):
load_pipe = LoadingPipeline(device)
op_pipe = DLTensorOpPipeline(wrapped_fun, device, synchronize)
load_pipe.build()
op_pipe.build()
for iter in range(ITERS):
pre1, pre2 = load_pipe.run()
post1, post2 = op_pipe.run()
if device == 'gpu':
pre1 = pre1.as_cpu()
pre2 = pre2.as_cpu()
post1 = post1.as_cpu()
post2 = post2.as_cpu()
compare(pre1, pre2, post1, post2)
def pytorch_compare(fun, pre1, pre2, post1, post2):
torch_pre1 = [torch.from_numpy(pre1.at(i)) for i in range(BATCH_SIZE)]
torch_pre2 = [torch.from_numpy(pre2.at(i)) for i in range(BATCH_SIZE)]
torch_post1, torch_post2 = fun(torch_pre1, torch_pre2)
for i in range(BATCH_SIZE):
assert numpy.array_equal(post1.at(i), torch_post1[i].numpy())
assert numpy.array_equal(post2.at(i), torch_post2[i].numpy())
def pytorch_case(fun, device):
common_case(pytorch_wrapper(fun), device, partial(pytorch_compare, fun))
def simple_pytorch_op(in1, in2):
fin1 = [t.to(dtype=torch.float) for t in in1]
fin2 = [t.to(dtype=torch.float) for t in in2]
return [fin1[i] + fin2[i] for i in range(len(fin1))], \
[fin1[i] - fin2[i] for i in range(len(fin1))]
def pytorch_red_channel_op(in1, in2):
return [t.narrow(2, 0, 1).squeeze() for t in in1], [t.narrow(2, 0, 1).squeeze() for t in in2]
def test_pytorch():
setup_pytorch()
for testcase in [simple_pytorch_op, pytorch_red_channel_op]:
for device in ['cpu', 'gpu']:
yield pytorch_case, testcase, device
yield from _gpu_sliced_torch_suite()
yield from _gpu_permuted_extents_torch_suite()
def mxnet_adapter(fun, in1, in2):
tin1 = [mxnd.from_dlpack(dltensor) for dltensor in in1]
tin2 = [mxnd.from_dlpack(dltensor) for dltensor in in2]
tout1, tout2 = fun(tin1, tin2)
return [mxnd.to_dlpack_for_read(tout) for tout in tout1], \
[mxnd.to_dlpack_for_read(tout) for tout in tout2]
def mxnet_wrapper(fun):
return lambda in1, in2: mxnet_adapter(fun, in1, in2)
def mxnet_compare(fun, pre1, pre2, post1, post2):
mxnet_pre1 = [mxnd.array(pre1.at(i)) for i in range(BATCH_SIZE)]
mxnet_pre2 = [mxnd.array(pre2.at(i)) for i in range(BATCH_SIZE)]
mxnet_post1, mxnet_post2 = fun(mxnet_pre1, mxnet_pre2)
for i in range(BATCH_SIZE):
assert numpy.array_equal(post1.at(i), mxnet_post1[i].asnumpy())
assert numpy.array_equal(post2.at(i), mxnet_post2[i].asnumpy())
def mxnet_case(fun, device):
setup_mxnet()
common_case(mxnet_wrapper(fun), device, partial(mxnet_compare, fun))
def mxnet_flatten(in1, in2):
return [mxnd.flatten(t) for t in in1], [mxnd.flatten(t) for t in in2]
def mxnet_slice(in1, in2):
return [t[:, :, 1] for t in in1], [t[:, :, 2] for t in in2]
def mxnet_cast(in1, in2):
return [mxnd.cast(t, dtype='float32') for t in in1], [mxnd.cast(t, dtype='int64') for t in in2]
def test_mxnet():
for testcase in [mxnet_flatten, mxnet_slice, mxnet_cast]:
for device in ['cpu', 'gpu']:
yield mxnet_case, testcase, device
def cupy_adapter_sync(fun, in1, in2):
with cupy_stream:
tin1 = [cupy.fromDlpack(dltensor) for dltensor in in1]
tin2 = [cupy.fromDlpack(dltensor) for dltensor in in2]
tout1, tout2 = fun(tin1, tin2)
out1, out2 = [tout.toDlpack() for tout in tout1], \
[tout.toDlpack() for tout in tout2]
cupy_stream.synchronize()
return out1, out2
def cupy_adapter(fun, in1, in2):
tin1 = [cupy.fromDlpack(dltensor) for dltensor in in1]
tin2 = [cupy.fromDlpack(dltensor) for dltensor in in2]
tout1, tout2 = fun(tin1, tin2)
return [tout.toDlpack() for tout in tout1], \
[tout.toDlpack() for tout in tout2]
def cupy_wrapper(fun, synchronize):
if synchronize:
return lambda in1, in2: cupy_adapter_sync(fun, in1, in2)
else:
return lambda in1, in2: cupy_adapter(fun, in1, in2)
def cupy_compare(fun, synchronize, pre1, pre2, post1, post2):
cupy_pre1 = [cupy.asarray(pre1.at(i)) for i in range(BATCH_SIZE)]
cupy_pre2 = [cupy.asarray(pre2.at(i)) for i in range(BATCH_SIZE)]
if synchronize:
cupy_post1, cupy_post2 = fun(cupy_pre1, cupy_pre2)
else:
stream = cupy.cuda.Stream()
cupy_post1, cupy_post2 = fun(cupy_pre1, cupy_pre2, stream=stream)
stream.synchronize()
for i in range(BATCH_SIZE):
assert post1.at(i).shape == cupy_post1[i].shape
assert post2.at(i).shape == cupy_post2[i].shape
assert numpy.array_equal(post1.at(i), cupy.asnumpy(cupy_post1[i]))
assert numpy.array_equal(post2.at(i), cupy.asnumpy(cupy_post2[i]))
def cupy_case(fun, synchronize=True):
common_case(cupy_wrapper(fun, synchronize), 'gpu',
partial(cupy_compare, fun, synchronize), synchronize)
def cupy_simple(in1, in2):
fin1 = [arr.astype(cupy.float32) for arr in in1]
fin2 = [arr.astype(cupy.float32) for arr in in2]
return [cupy.sin(fin1[i] * fin2[i]).astype(cupy.float32) for i in range(BATCH_SIZE)], \
[cupy.cos(fin1[i] * fin2[i]).astype(cupy.float32) for i in range(BATCH_SIZE)]
def gray_scale_call(input):
height = input.shape[0]
width = input.shape[1]
output = cupy.ndarray((height, width), dtype=cupy.float32)
gray_scale_kernel(grid=((height + 31) // 32, (width + 31) // 32),
block=(32, 32),
stream=cupy.cuda.get_current_stream(),
args=(output, input, height, width))
return output
def cupy_kernel_square_diff(in1, in2):
fin1 = [arr.astype(cupy.float32) for arr in in1]
fin2 = [arr.astype(cupy.float32) for arr in in2]
out1, out2 = [square_diff_kernel(fin1[i], fin2[i]) for i in range(BATCH_SIZE)], in2
return out1, out2
def cupy_kernel_mix_channels(in1, in2):
return [mix_channels_kernel(in1[i], in2[i]) for i in range(BATCH_SIZE)], in2
def cupy_kernel_gray_scale(in1, in2, stream=None):
if stream is None:
stream = ops.PythonFunction.current_stream()
s = cupy.cuda.Stream()
s.ptr = stream.ptr
with s:
out1 = [gray_scale_call(arr) for arr in in1]
out2 = [gray_scale_call(arr) for arr in in2]
s.ptr = 0
return out1, out2
def test_cupy():
setup_cupy()
print(cupy)
for testcase in [cupy_simple, cupy_kernel_square_diff, cupy_kernel_mix_channels]:
yield cupy_case, testcase
yield from _cupy_flip_with_negative_strides_suite()
def test_cupy_kernel_gray_scale():
setup_cupy()
cupy_case(cupy_kernel_gray_scale, synchronize=False)
# ---------------- test strided copy kernel with strided tensors -----------------
def get_random_torch_batch(g, shapes, dtype):
is_fp = torch.is_floating_point(torch.tensor([], dtype=dtype))
if is_fp:
return [torch.rand((shape), generator=g, dtype=dtype) for shape in shapes]
else:
iinfo = torch.iinfo(dtype)
dtype_min, dtype_max = iinfo.min, iinfo.max
return [
torch.randint(dtype_min, dtype_max, shape, generator=g, dtype=dtype) for shape in shapes
]
def get_sliced_torch_case(case_name):
# [(extents of the original shape), (slice of the corresponding extent)]
# the original extents and slice shapes are purposely all prime numbers
# to test handling of unaligned tensors
prime_images = [
((107, 181, 3), (slice(1, 102), slice(179), slice(None))),
((1097, 227, 5), (slice(None), slice(None), slice(1, 4))),
((107, 167, 1), (slice(1, 14), slice(None), slice(None))),
((107, 23, 3), (slice(103), slice(None), slice(None))),
((173, 23, 5), (slice(None), slice(None), slice(1, 1))),
((401, 167, 5), (slice(4, 167), slice(None), slice(0, 3))),
((181, 401, 5), (slice(2, None), slice(397), slice(None))),
((181, 107, 1), (slice(179), slice(103), slice(1))),
((373, 181, 5), (slice(None), slice(None), slice(None, None, 2))),
((199, 401, 3), (slice(None), slice(None), slice(None))),
((167, 1097, 1), (slice(8, None, 7), slice(24, None, 23), slice(None))),
((181, 61, 1), (slice(179), slice(58, None), slice(None))),
((401, 61, 1), (slice(397), slice(None), slice(None))),
((373, 173, 1), (slice(None), slice(167), slice(None))),
((173, 199, 3), (slice(None), slice(None), slice(2, 3))),
((181, 1097, 1), (slice(2, None, None), slice(1093), slice(None))),
]
prime_grey_images = [((199, 23), (slice(None, 173, None), slice(None, 19, None))),
((373, 373), (slice(None, 331, None), slice(42, None, None))),
((1097, 181), (slice(114, None, None), slice(None, 157, None))),
((61, 227), (slice(None, 53, None), slice(28, None, None))),
((1097, 61), (slice(114, None, None), slice(None, 53, None))),
((181, 199), (slice(None, 157, None), slice(None, 173, None))),
((1097, 1097), (slice(114, None, None), slice(None, 983, None))),
((373, 227), (slice(42, None, None), slice(None, 199, None))),
((227, 173), (slice(None, 199, None), slice(None, 151, None))),
((227, 173), (slice(None, 199, None), slice(22, None, None))),
((401, 173), (slice(42, None, None), slice(None, 151, None))),
((107, 23), (slice(18, None, None), slice(None, 19, None))),
((23, 199), (slice(4, None, None), slice(26, None, None))),
((199, 23), (slice(26, None, None), slice(4, None, None))),
((227, 23), (slice(None, 199, None), slice(None, 19, None))),
((23, 23), (slice(4, None, None), slice(4, None, None))),
((167, 181), (slice(18, None, None), slice(24, None, None))),
((167, 181), (slice(18, None, None), slice(24, None, None))),
((181, 227), (slice(None, 157, None), slice(None, 199, None))),
((401, 199), (slice(None, 359, None), slice(None, 173, None))),
((107, 181), (slice(None, 89, None), slice(None, 157, None))),
((173, 61), (slice(None, 151, None), slice(8, None, None))),
((227, 167), (slice(None, 199, None), slice(18, None, None))),
((173, 401), (slice(22, None, None), slice(None, 359, None))),
((23, 227), (slice(4, None, None), slice(28, None, None))),
((227, 23), (slice(28, None, None), slice(4, None, None))),
((373, 373), (slice(42, None, None), slice(None, 331, None))),
((61, 107), (slice(None, 53, None), slice(18, None, None))),
((181, 61), (slice(24, None, None), slice(None, 53, None))),
((107, 181), (slice(None, 89, None), slice(24, None, None))),
((401, 23), (slice(42, None, None), slice(4, None, None))),
((373, 401), (slice(None, 331, None), slice(42, None, None)))]
vid = [((17, ) + shape, (slice(None), ) + sl) for shape, sl in prime_images]
ndim_11 = [(tuple(3 if i == j else 1 for j in range(11)) + shape, ((slice(None), ) * 11) + sl)
for i, (shape, sl) in enumerate(prime_images)]
cases = {
"slice_images": prime_images,
"slice_grey_images": prime_grey_images,
"slice_vid": vid,
"slice_ndim_11": ndim_11
}
shape_slices = cases[case_name]
shapes, slices = tuple(zip(*shape_slices))
assert len(shapes) == len(slices) == len(shape_slices)
return shapes, slices
def _gpu_sliced_torch_case(case_name, dtype, g):
shapes, slices = get_sliced_torch_case(case_name)
input_batch = get_random_torch_batch(g, shapes, dtype)
assert len(input_batch) == len(shapes)
# returns sliced view of the input tensors
def sliced_tensor(batch):
stream = current_dali_stream()
torch_stream = torch.cuda.ExternalStream(stream)
with torch.cuda.stream(torch_stream):
tensors = [torch_dlpack.from_dlpack(t) for t in batch]
assert len(tensors) == len(slices)
tensor_views = [t[sl] for t, sl in zip(tensors, slices)]
out = [torch_dlpack.to_dlpack(t) for t in tensor_views]
return out
@pipeline_def(batch_size=len(input_batch), num_threads=4, device_id=0)
def pipeline():
data = fn.external_source(lambda: input_batch)
data = fn.dl_tensor_python_function(data.gpu(), batch_processing=True,
function=sliced_tensor, synchronize_stream=False)
return data
p = pipeline()
p.build()
out, = p.run()
out = [numpy.array(sample) for sample in out.as_cpu()]
ref = [numpy.array(sample)[sl] for sample, sl in zip(input_batch, slices)]
numpy.testing.assert_equal(out, ref)
def _gpu_sliced_torch_suite():
g = torch.Generator()
g.manual_seed(42)
for case_name in ("slice_images", "slice_grey_images", "slice_vid", "slice_ndim_11"):
for dtype in (torch.uint8, torch.int16, torch.float32, torch.float64):
yield _gpu_sliced_torch_case, case_name, dtype, g
def get_permute_extents_case(case_name):
rng = random.Random(44)
def permuted(it):
copy = list(it)
rng.shuffle(copy)
return tuple(copy)
def permuted_extents(ndim):
extents = list(range(ndim))
rng.shuffle(extents)
return tuple(extents)
# the original extents are purposely all prime numbers
# to test handling of unaligned tensors
prime_images = [
(199, 181, 3),
(1097, 61, 5),
(373, 373, 1),
(107, 23, 3),
(173, 23, 5),
(401, 167, 5),
(181, 401, 5),
(181, 107, 1),
(373, 181, 5),
(199, 401, 3),
(1097, 1097, 1),
(181, 61, 1),
(401, 61, 1),
(373, 173, 1),
(227, 199, 3),
(181, 1097, 1),
]
if case_name == "transpose_channels_image":
prime_images_transposed_channel = list(zip(prime_images, [(2, 0, 1)] * len(prime_images)))
assert len(prime_images_transposed_channel) == len(prime_images)
return prime_images_transposed_channel
if case_name == "transpose_hw_image":
prime_images_transposed_hw = list(zip(prime_images, [(1, 0, 2)] * len(prime_images)))
assert len(prime_images_transposed_hw) == len(prime_images)
return prime_images_transposed_hw
if case_name == "image_random_permutation":
prime_images_rnd_permuted = list(
zip(prime_images, [permuted_extents(3) for _ in range(len(prime_images))]))
assert len(prime_images_rnd_permuted) == len(prime_images)
return prime_images_rnd_permuted
if case_name == "transpose_channels_video":
prime_vid_like = [
(13, 199, 181, 3),
(3, 1097, 61, 5),
(17, 373, 373, 1),
(5, 107, 23, 3),
(11, 173, 23, 5),
(11, 401, 167, 5),
(7, 181, 401, 5),
(5, 181, 107, 1),
(3, 373, 181, 5),
(23, 199, 401, 3),
(3, 1097, 1097, 1),
(31, 181, 61, 1),
(17, 401, 61, 1),
(5, 373, 173, 1),
(3, 227, 199, 3),
(7, 181, 1097, 1),
]
prime_vid_like_transposed_channel = list(
zip(prime_vid_like, [(3, 0, 1, 2)] * len(prime_vid_like)))
assert len(prime_vid_like_transposed_channel) == len(prime_vid_like)
return prime_vid_like_transposed_channel
if case_name == "ndim_6_permute_outermost_3":
# optimization to early stop translation of flat output index to flat input index
# should kick in, test if that's fine
ndim_6_transpose_outermost = [(permuted([3, 5, 7, 11, 13, 17]), permuted_extents(3) +
(3, 4, 5)) for _ in range(5)]
assert len(ndim_6_transpose_outermost) == 5
return ndim_6_transpose_outermost
if case_name == "ndim_6_permute_all":
ndim_6_rnd_permuted = [(permuted([3, 5, 7, 11, 13, 17]), permuted_extents(6))
for _ in range(32)]
assert len(ndim_6_rnd_permuted) == 32
return ndim_6_rnd_permuted
if case_name == "ndim_15_permute_all":
# max ndim supported
ndim_15_rnd_permuted = [(permuted([3, 5, 7, 11, 13, 17, 1, 1, 1, 1, 1, 1, 1, 1,
1]), permuted_extents(15)) for _ in range(32)]
assert len(ndim_15_rnd_permuted) == 32
return ndim_15_rnd_permuted
def _gpu_permuted_extents_torch_case(case_name, dtype, g):
shapes_perms = get_permute_extents_case(case_name)
shapes, perms = tuple(zip(*shapes_perms))
assert len(shapes) == len(perms) == len(shapes_perms)
input_batch = get_random_torch_batch(g, shapes, dtype)
assert len(input_batch) == len(shapes)
# returns permuted view of the input tensors
def permuted_tensors(batch):
stream = current_dali_stream()
torch_stream = torch.cuda.ExternalStream(stream)
with torch.cuda.stream(torch_stream):
tensors = [torch_dlpack.from_dlpack(t) for t in batch]
assert len(tensors) == len(perms)
tensor_views = [t.permute(perm) for t, perm in zip(tensors, perms)]
out = [torch_dlpack.to_dlpack(t) for t in tensor_views]
return out
@pipeline_def(batch_size=len(input_batch), num_threads=4, device_id=0)
def pipeline():
data = fn.external_source(lambda: input_batch)
data = fn.dl_tensor_python_function(data.gpu(), batch_processing=True,
function=permuted_tensors, synchronize_stream=False)
return data
p = pipeline()
p.build()
out, = p.run()
out = [numpy.array(sample) for sample in out.as_cpu()]
ref = [numpy.array(sample).transpose(perm) for sample, perm in zip(input_batch, perms)]
numpy.testing.assert_equal(out, ref)
def _gpu_permuted_extents_torch_suite():
g = torch.Generator()
g.manual_seed(44)
for case_name in (
"transpose_channels_image",
"transpose_hw_image",
"image_random_permutation",
"transpose_channels_video",
"ndim_6_permute_outermost_3",
"ndim_6_permute_all",
"ndim_15_permute_all",
):
for dtype in (torch.uint8, torch.int16, torch.int32, torch.float64):
yield _gpu_permuted_extents_torch_case, case_name, dtype, g
def _cupy_negative_strides_case(dtype, batch_size, steps):
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=0, seed=42)
def baseline_pipeline():
img, _ = fn.readers.file(name="Reader", file_root=images_dir, random_shuffle=True, seed=42)
img = fn.decoders.image(img, device="mixed")
img = fn.cast(img, dtype=dtype)
img = img[tuple(slice(None, None, step) for step in steps)]
return img
def flip_cupy(dlps):
stream = current_dali_stream()
cp_stream = cupy.cuda.ExternalStream(stream, device_id=0)
with cp_stream:
imgs = [cupy.from_dlpack(dlp) for dlp in dlps]
imgs = [img[tuple(slice(None, None, step) for step in steps)] for img in imgs]
imgs = [img.toDlpack() for img in imgs]
return imgs
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=0, seed=42)
def pipeline():
img, _ = fn.readers.file(name="Reader", file_root=images_dir, random_shuffle=True, seed=42)
img = fn.decoders.image(img, device="mixed")
img = fn.cast(img, dtype=dtype)
img = fn.dl_tensor_python_function(img, batch_processing=True, function=flip_cupy,
synchronize_stream=False)
return img
p = pipeline()
p.build()
baseline = baseline_pipeline()
baseline.build()
for _ in range(5):
batch, = p.run()
baseline_batch, = baseline.run()
batch = [numpy.array(sample) for sample in batch.as_cpu()]
baseline_batch = [numpy.array(sample) for sample in baseline_batch.as_cpu()]
assert len(batch) == len(baseline_batch) == batch_size
for sample, baseline_sample in zip(batch, baseline_batch):
numpy.testing.assert_equal(sample, baseline_sample)
def _cupy_flip_with_negative_strides_suite():
for dtype, batch_size, steps in [
(types.DALIDataType.UINT8, 4, (-1, -1, None)),
(types.DALIDataType.UINT8, 16, (-1, None, None)),
(types.DALIDataType.UINT8, 2, (None, None, -1)),
(types.DALIDataType.UINT8, 5, (-1, -1, -1)),
(types.DALIDataType.UINT8, 16, (-2, -2, None)),
(types.DALIDataType.UINT16, 11, (None, -1, None)),
(types.DALIDataType.FLOAT, 16, (2, -2, None)),
(types.DALIDataType.INT32, 12, (-2, None, None)),
(types.DALIDataType.FLOAT64, 11, (-2, 4, -1)),
]:
yield _cupy_negative_strides_case, dtype, batch_size, steps
|
DALI-main
|
dali/test/python/test_dltensor_operator.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import numpy as np
import nvidia.dali as dali
import nvidia.dali.types as types
video_directory = '/tmp/labelled_videos/'
video_directory_multiple_resolutions = '/tmp/video_resolution/'
pipeline_params = {
'num_threads': 8,
'device_id': 0,
'seed': 0
}
video_reader_params = [{
'device': 'gpu',
'file_root': video_directory,
'sequence_length': 32,
'random_shuffle': False
}, {
'device': 'gpu',
'file_root': video_directory_multiple_resolutions,
'sequence_length': 8,
'random_shuffle': False
}]
resize_params = [{
'resize_x': 300,
'resize_y': 200,
'interp_type': types.DALIInterpType.INTERP_CUBIC,
'minibatch_size': 8
}, {
'resize_x': 300,
'interp_type': types.DALIInterpType.INTERP_CUBIC,
'minibatch_size': 8
}, {
'resize_x': 300,
'resize_y': 200,
'interp_type': types.DALIInterpType.INTERP_LANCZOS3,
'minibatch_size': 8
}, {
'resize_shorter': 300,
'interp_type': types.DALIInterpType.INTERP_CUBIC,
'minibatch_size': 8
}, {
'resize_longer': 500,
'interp_type': types.DALIInterpType.INTERP_CUBIC,
'minibatch_size': 8
}, {
'resize_x': 300,
'resize_y': 200,
'min_filter': types.DALIInterpType.INTERP_CUBIC,
'mag_filter': types.DALIInterpType.INTERP_TRIANGULAR,
'minibatch_size': 8
}, {
'resize_x': 300,
'resize_y': 200,
'interp_type': types.DALIInterpType.INTERP_CUBIC,
'minibatch_size': 4
}]
def video_reader_pipeline_base(
video_reader, batch_size, video_reader_params, resize_params={}):
pipeline = dali.pipeline.Pipeline(batch_size=batch_size, **pipeline_params)
with pipeline:
outputs = video_reader(**video_reader_params, **resize_params)
if type(outputs) is list:
outputs = outputs[0]
pipeline.set_outputs(outputs)
pipeline.build()
return pipeline
def video_reader_resize_pipeline(batch_size, video_reader_params, resize_params):
return video_reader_pipeline_base(dali.fn.readers.video_resize, batch_size, video_reader_params,
resize_params)
def video_reader_pipeline(batch_size, video_reader_params):
return video_reader_pipeline_base(dali.fn.readers.video, batch_size, video_reader_params)
def ground_truth_pipeline(batch_size, video_reader_params, resize_params):
pipeline = video_reader_pipeline(batch_size, video_reader_params)
def get_next_frame():
pipe_out, = pipeline.run()
sequences_out = pipe_out.as_cpu().as_array()
for sample in range(batch_size):
for frame in range(video_reader_params['sequence_length']):
yield [np.expand_dims(sequences_out[sample][frame], 0)]
gt_pipeline = dali.Pipeline(batch_size=1, **pipeline_params)
with gt_pipeline:
resized_frame = dali.fn.external_source(
source=get_next_frame, num_outputs=1)
resized_frame = resized_frame[0].gpu()
resized_frame = dali.fn.resize(
resized_frame, **resize_params)
gt_pipeline.set_outputs(resized_frame)
gt_pipeline.build()
return gt_pipeline
def compare_video_resize_pipelines(pipeline, gt_pipeline, batch_size, video_length):
global_sample_id = 0
batch_gpu, = pipeline.run()
batch = batch_gpu.as_cpu()
for sample_id in range(batch_size):
global_sample_id = global_sample_id + 1
sample = batch.at(sample_id)
for frame_id in range(video_length):
frame = sample[frame_id]
gt_frame = gt_pipeline.run()[0].as_cpu().as_array()[0]
if gt_frame.shape == frame.shape:
assert (gt_frame == frame).all(), "Images are not equal"
else:
assert gt_frame.shape == frame.shape, \
f"Shapes are not equal: {gt_frame.shape} != {frame.shape}"
def run_for_params(batch_size, video_reader_params, resize_params):
pipeline = video_reader_resize_pipeline(batch_size, video_reader_params, resize_params)
gt_pipeline = ground_truth_pipeline(batch_size, video_reader_params, resize_params)
compare_video_resize_pipelines(pipeline, gt_pipeline, batch_size,
video_reader_params['sequence_length'])
# The intermediate pipeline from ground_truth_pipeline gets entangled in cell objects
# and is not automatically destroyed. The pipeline outputs are kept alive,
# effectively leaking large amounts of GPU memory.
gc.collect()
def test_video_resize(batch_size=2):
for vp in video_reader_params:
for rp in resize_params:
yield run_for_params, batch_size, vp, rp
|
DALI-main
|
dali/test/python/test_video_reader_resize.py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import psutil
import weakref
def capture_processes(pool):
"""Need to be called to register the processes created by the pool. It is later used
by the teardown_function to check if no process stayed alive after the test finished.
"""
global pool_processes
global pool_threads
global pools
if pool is not None:
pools.append(weakref.ref(pool))
pool_processes.extend(pool.pids())
proc_pool = pool.pool
pool_threads.append(proc_pool._observer.thread)
check_shm_for_dali("All shm chunks should be closed after initial pool setup, found {}")
def setup_function():
"""Prepare for the check if all started processes are no longer children of current process
"""
global pool_processes
global pool_threads
global pools
pool_processes = []
pool_threads = []
pools = []
def teardown_function():
"""Check if there are no children processes started by the test after it ended.
Be sure to call `capture_processes` in the test.
"""
global pool_processes
global pool_threads
global pools
assert len(pool_processes), "No processes where tracked - did the test call capture_processes?"
pools_not_collected = [pool_ref() is not None for pool_ref in pools]
current_process = psutil.Process()
children_pids = [process.pid for process in current_process.children()]
left = set(pool_processes).intersection(children_pids)
assert len(left) == 0, (
f"Pipeline-started processes left after test is finished, pids alive: {left},\n"
f"pids started during tests: {pool_processes}.\n"
f"Pools not collected: {sum(pools_not_collected)}")
alive_threads = [thread.is_alive() for thread in pool_threads]
assert sum(alive_threads) == 0, "Some pool related threads are left after the test finished. " \
"Started in test suite: {}, still active: {}. " \
"Active threads map in the order of creation {}".format(
len(pool_threads), sum(alive_threads), alive_threads)
def check_shm_for_dali(msg):
shm_paths = ["/dev/shm/", "/run/shm/"]
for shm_path in shm_paths:
if os.path.isdir(shm_path):
shm_handles = os.listdir(shm_path)
for handle in shm_handles:
assert "nvidia_dali_" not in handle, msg.format(shm_path + handle)
def setup_module():
check_shm_for_dali("Expected clear shared mem environment before starting tests, "
"found old DALI file handle: {}")
def teardown_module():
check_shm_for_dali("Test left opened shared memory file handle: {}")
|
DALI-main
|
dali/test/python/test_pool_utils.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nose_utils # noqa:F401
import os
import numpy as np
import nvidia.dali.fn as fn
import nvidia.dali.math as math
import nvidia.dali.types as types
from nvidia.dali.pipeline import Pipeline
from test_utils import get_dali_extra_path
test_data_root = get_dali_extra_path()
def check_bbox_random_crop_adjust_polygons(file_root,
annotations_file,
batch_size=3,
num_iters=4,
num_threads=4,
device_id=0,
seed=1234):
pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id, seed=seed)
with pipe:
# Read data from COCO
# ratio=True means both bboxes and masks coordinates will be
# relative to the image dimensions (range [0.0, 1.0])
inputs, in_bboxes, labels, in_polygons, in_vertices = \
fn.readers.coco(
file_root=file_root, annotations_file=annotations_file, shard_id=0, num_shards=1,
ratio=True, ltrb=True, polygon_masks=True
)
# Generate a random crop. out_bboxes are adjusted to the crop window
slice_anchor, slice_shape, out_bboxes, labels, bbox_indices = \
fn.random_bbox_crop(
in_bboxes, labels,
aspect_ratio=[0.5, 2.0], thresholds=[0, 0.1, 0.3, 0.5, 0.7, 0.9],
scaling=[0.3, 1.0], bbox_layout='xyXY', output_bbox_indices=True
)
# Crop the image
_ = fn.decoders.image_slice(
inputs, slice_anchor, slice_shape,
device='mixed', axis_names='WH'
)
sel_polygons, sel_vertices = fn.segmentation.select_masks(
bbox_indices, in_polygons, in_vertices
)
# Adjust masks coordinates to the coordinate space of the cropped image
MT = fn.transforms.crop(from_start=slice_anchor, from_end=(slice_anchor + slice_shape))
out_vertices = fn.coord_transform(sel_vertices, MT=MT)
# Converting to absolute coordinates (demo purposes)
image_shape = fn.peek_image_shape(inputs, dtype=types.FLOAT)
h = fn.slice(image_shape, 0, 1, axes=[0])
w = fn.slice(image_shape, 1, 1, axes=[0])
# Original bboxes
bbox_x = fn.slice(in_bboxes, 0, 1, axes=[1])
bbox_y = fn.slice(in_bboxes, 1, 1, axes=[1])
bbox_X = fn.slice(in_bboxes, 2, 1, axes=[1])
bbox_Y = fn.slice(in_bboxes, 3, 1, axes=[1])
in_bboxes_abs = fn.cat(bbox_x * w, bbox_y * h, bbox_X * w, bbox_Y * h, axis=1)
# Transform to convert relative coordinates to absolute
scale_rel_to_abs = fn.transforms.scale(scale=fn.cat(w, h))
# Selected vertices (relative coordinates)
sel_vertices_abs = fn.coord_transform(out_vertices, MT=scale_rel_to_abs)
# Output bboxes
bbox2_x = fn.slice(out_bboxes, 0, 1, axes=[1])
bbox2_y = fn.slice(out_bboxes, 1, 1, axes=[1])
bbox2_X = fn.slice(out_bboxes, 2, 1, axes=[1])
bbox2_Y = fn.slice(out_bboxes, 3, 1, axes=[1])
out_bboxes_abs = fn.cat(bbox2_x * w, bbox2_y * h, bbox2_X * w, bbox2_Y * h, axis=1)
# Output vertices (absolute coordinates)
out_vertices_abs = fn.coord_transform(out_vertices, MT=scale_rel_to_abs)
# Clamped coordinates
out_vertices_clamped = math.clamp(out_vertices, 0.0, 1.0)
out_vertices_clamped_abs = fn.coord_transform(out_vertices_clamped, MT=scale_rel_to_abs)
pipe.set_outputs(in_vertices, sel_vertices, sel_vertices_abs,
out_vertices, out_vertices_clamped, out_vertices_abs, out_vertices_clamped_abs,
in_bboxes, in_bboxes_abs, out_bboxes, out_bboxes_abs,
in_polygons, sel_polygons,
image_shape, slice_anchor, slice_shape, bbox_indices)
pipe.build()
# Enough iterations to see an example with more than one bounding box
for i in range(num_iters):
outs = pipe.run()
for j in range(batch_size):
(
in_vertices, sel_vertices, sel_vertices_abs,
out_vertices, out_vertices_clamped, out_vertices_abs, out_vertices_clamped_abs,
in_bboxes, in_bboxes_abs, out_bboxes, out_bboxes_abs,
in_polygons, sel_polygons,
image_shape, slice_anchor, slice_shape, bbox_indices
) = (outs[k].at(j) for k in range(len(outs)))
# Checking that the output polygon descriptors are the ones associated with the
# selected bounding boxes
expected_polygons_list = []
expected_vertices_list = []
ver_count = 0
for k in range(in_polygons.shape[0]):
mask_id = in_polygons[k][0]
in_ver_start_idx = in_polygons[k][1]
in_ver_end_idx = in_polygons[k][2]
pol_nver = in_ver_end_idx - in_ver_start_idx
if mask_id in bbox_indices:
expected_polygons_list.append([mask_id, ver_count, ver_count + pol_nver])
for j in range(in_ver_start_idx, in_ver_end_idx):
expected_vertices_list.append(in_vertices[j])
ver_count = ver_count + pol_nver
expected_sel_polygons = np.array(expected_polygons_list)
np.testing.assert_equal(expected_sel_polygons, sel_polygons)
# Checking the selected vertices correspond to the selected masks
expected_sel_vertices = np.array(expected_vertices_list)
np.testing.assert_equal(expected_sel_vertices, sel_vertices)
# Chekc that the vertices are correctly mapped to the cropping window
expected_out_vertices = np.copy(expected_sel_vertices)
crop_x, crop_y = slice_anchor
crop_w, crop_h = slice_shape
for v in range(expected_out_vertices.shape[0]):
expected_out_vertices[v, 0] = (expected_out_vertices[v, 0] - crop_x) / crop_w
expected_out_vertices[v, 1] = (expected_out_vertices[v, 1] - crop_y) / crop_h
np.testing.assert_allclose(expected_out_vertices, out_vertices, rtol=1e-4)
# Checking the conversion to absolute coordinates
h, w, _ = image_shape
wh = np.array([w, h])
whwh = np.array([w, h, w, h])
expected_out_vertices_abs = expected_out_vertices * wh
np.testing.assert_allclose(expected_out_vertices_abs, out_vertices_abs, rtol=1e-4)
# Checking clamping of the relative coordinates
expected_out_vertices_clamped = np.clip(expected_out_vertices, a_min=0.0, a_max=1.0)
np.testing.assert_allclose(expected_out_vertices_clamped,
out_vertices_clamped,
rtol=1e-4)
# Checking clamping of the absolute coordinates
expected_out_vertices_clamped_abs = np.clip(expected_out_vertices_abs, 0, wh)
np.testing.assert_allclose(expected_out_vertices_clamped_abs,
out_vertices_clamped_abs,
rtol=1e-4)
# Checking scaling of the bounding boxes
expected_in_bboxes_abs = in_bboxes * whwh
np.testing.assert_allclose(expected_in_bboxes_abs, in_bboxes_abs, rtol=1e-4)
# Check box selection and mapping to the cropping window
expected_out_bboxes = np.copy(in_bboxes[bbox_indices, :])
for k in range(expected_out_bboxes.shape[0]):
expected_out_bboxes[k, 0] = (expected_out_bboxes[k, 0] - crop_x) / crop_w
expected_out_bboxes[k, 1] = (expected_out_bboxes[k, 1] - crop_y) / crop_h
expected_out_bboxes[k, 2] = (expected_out_bboxes[k, 2] - crop_x) / crop_w
expected_out_bboxes[k, 3] = (expected_out_bboxes[k, 3] - crop_y) / crop_h
expected_out_bboxes = np.clip(expected_out_bboxes, a_min=0.0, a_max=1.0)
np.testing.assert_allclose(expected_out_bboxes, out_bboxes, rtol=1e-4)
expected_out_bboxes_abs = expected_out_bboxes * whwh
np.testing.assert_allclose(expected_out_bboxes_abs, out_bboxes_abs, rtol=1e-4)
def test_bbox_random_crop_adjust_polygons():
file_root = os.path.join(test_data_root, 'db', 'coco', 'images')
train_annotations = os.path.join(test_data_root, 'db', 'coco', 'instances.json')
check_bbox_random_crop_adjust_polygons(file_root, train_annotations, batch_size=3, num_iters=4)
|
DALI-main
|
dali/test/python/test_pipeline_segmentation.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import numpy as np
import nvidia.dali.fn as fn
import torch
from nvidia.dali import pipeline_def
from nvidia.dali import types
from nvidia.dali.backend import TensorListGPU
shape = [4000000]
batch_size = 2
@pipeline_def(batch_size=batch_size, device_id=0, num_threads=8)
def _test_pipe():
def get_tensor(si):
return np.arange(si.idx_in_epoch, si.idx_in_epoch + shape[0], dtype=np.int32)
inp = fn.external_source(get_tensor, batch=False)
return inp.gpu()
to_torch_type = {
types.DALIDataType.FLOAT: torch.float32,
types.DALIDataType.FLOAT64: torch.float64,
types.DALIDataType.FLOAT16: torch.float16,
types.DALIDataType.UINT8: torch.uint8,
types.DALIDataType.INT8: torch.int8,
types.DALIDataType.INT16: torch.int16,
types.DALIDataType.INT32: torch.int32,
types.DALIDataType.INT64: torch.int64
}
def feed_ndarray(tensor_or_tl, arr, cuda_stream=None, non_blocking=False):
"""
Copy contents of DALI tensor to PyTorch's Tensor.
Parameters
----------
`tensor_or_tl` : TensorGPU or TensorListGPU
`arr` : torch.Tensor
Destination of the copy
`cuda_stream` : torch.cuda.Stream, cudaStream_t or any value that can be cast to cudaStream_t.
CUDA stream to be used for the copy
(if not provided, an internal user stream will be selected)
In most cases, using pytorch's current stream is expected (for example,
if we are copying to a tensor allocated with torch.zeros(...))
"""
dali_type = to_torch_type[tensor_or_tl.dtype]
if isinstance(tensor_or_tl, TensorListGPU):
dali_tensor = tensor_or_tl.as_tensor()
else:
dali_tensor = tensor_or_tl
assert dali_type == arr.dtype, (f"The element type of DALI Tensor/TensorList "
f"doesn't match the element type of the target PyTorch Tensor: "
f"{dali_type} vs {arr.dtype}")
assert dali_tensor.shape() == list(arr.size()), (
f"Shapes do not match: DALI tensor has size {dali_tensor.shape()}, "
f"but PyTorch Tensor has size {list(arr.size())}")
cuda_stream = types._raw_cuda_stream(cuda_stream)
# turn raw int to a c void pointer
c_type_pointer = ctypes.c_void_p(arr.data_ptr())
stream = None if cuda_stream is None else ctypes.c_void_p(cuda_stream)
tensor_or_tl.copy_to_external(c_type_pointer, stream, non_blocking)
return arr
def _test_copy_to_external(use_tensor_list, non_blocking):
"""Test whether the copy_to_external is properly synchronized before the
output tensor is recycled.
copy_to_external can work in a non-blocking mode - in this mode, the data is
copied on a user-provided stream, but the host thread doesn't block until
the copy finishes. However, to ensure that a tensor has been consumed before
allowing its reuse, a synchronization is scheduled on the stream associated
with the tensor being copied.
WARNING:
This test is crafted so that it fails when the synchronization doesn't occur.
The timing is controlled by data sizes and number of iterations - do not
change these values!
"""
def ref_tensor(batch_size, sample_shape, start_value):
volume = np.prod(sample_shape)
sample0 = torch.arange(start_value, start_value + volume, dtype=torch.int32,
device="cuda:0").reshape(shape)
return torch.stack([sample0 + i for i in range(batch_size)])
def check(arr, ref):
return torch.equal(arr, ref)
# get a Pytorch CUDA stream
stream = torch.cuda.Stream(device=0)
with torch.cuda.stream(stream):
# allocate an empty tensor into which the pipeline output will be copied
arr = torch.empty([batch_size] + shape, dtype=torch.int32, device="cuda:0")
# create a reference tensor...
ref = ref_tensor(batch_size, shape, 0)
# ...and tensors which will be used to hog the GPU
hog = [ref_tensor(batch_size, shape, i * batch_size) for i in range(20)]
# try 10 times
for i in range(10):
# create a fresh pipeline
pipe = _test_pipe(prefetch_queue_depth=2)
pipe.build()
# schedule some runs ahead, so we know that the execution
# of the next iteration starts immediately
pipe.schedule_run()
pipe.schedule_run()
out, = pipe.share_outputs()
# do something time-consuming on the torch stream to give DALI time
# to clobber the buffer
hog = [torch.sqrt(x) for x in hog]
# copy the result asynchronously
copy_source = out if use_tensor_list else out.as_tensor()
feed_ndarray(copy_source, arr, stream.cuda_stream, non_blocking)
pipe.release_outputs()
# drain
_, = pipe.share_outputs()
pipe.release_outputs()
# if no appropriate synchronization is done, the array is likely
# clobbered with the results from the second iteration
assert check(arr, ref)
# free resources to prevent OOM in the next iteration
del pipe
def test_copy_to_external():
for use_tl in [False, True]:
for non_blocking in [False, True]:
yield _test_copy_to_external, use_tl, non_blocking
|
DALI-main
|
dali/test/python/test_copy_to_external_torch.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
from nvidia.dali import Pipeline, pipeline_def
import nvidia.dali.plugin.tf as dali_tf
from nvidia.dali.plugin.tf.experimental import Input
from nvidia.dali import fn
from nose.tools import with_setup
from test_dali_tf_dataset_pipelines import \
FixedSampleIterator, RandomSampleIterator, external_source_converter_multiple, \
external_source_converter_with_callback, external_source_converter_with_fixed_value, \
external_source_tester, external_source_tester_multiple, get_min_shape_helper, \
many_input_pipeline
from test_dali_tf_es_pipelines import \
external_source_to_tf_dataset, gen_tf_with_dali_external_source, get_external_source_pipe
from test_utils_tensorflow import \
get_dali_dataset_from_pipeline, get_image_pipeline, get_mix_size_image_pipeline, \
run_dataset_eager_mode, run_tf_dataset_eager_mode, \
run_tf_dataset_multigpu_eager_manual_placement, \
run_tf_dataset_multigpu_eager_mirrored_strategy, \
skip_for_incompatible_tf, skip_inputs_for_incompatible_tf
from nose_utils import raises
import random as random
import itertools
tf.compat.v1.enable_eager_execution()
def test_tf_dataset_gpu():
run_tf_dataset_eager_mode('gpu')
def test_tf_dataset_cpu():
run_tf_dataset_eager_mode('cpu')
# Return differently sized images to check if DALIDataset can handle this case gracefully
@raises(tf.errors.FailedPreconditionError,
glob='Batch output at index * from DALI pipeline is not uniform')
def test_mixed_size_pipeline():
run_tf_dataset_eager_mode('gpu', get_pipeline_desc=get_mix_size_image_pipeline)
def run_tf_dataset_with_constant_input(dev, shape, value, dtype, batch):
tensor = np.full(shape, value, dtype)
get_pipeline_desc = external_source_tester(
shape, dtype, FixedSampleIterator(tensor), batch=batch)
to_dataset = external_source_converter_with_fixed_value(shape, dtype, tensor, batch)
run_tf_dataset_eager_mode(dev, get_pipeline_desc=get_pipeline_desc, to_dataset=to_dataset)
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_dataset_with_constant_input():
for dev in ['cpu', 'gpu']:
for shape in [(7, 42), (64, 64, 3), (3, 40, 40, 4)]:
for dtype in [np.uint8, np.int32, np.float32]:
for batch in ["dataset", True, False, None]:
value = random.choice([42, 255])
yield run_tf_dataset_with_constant_input, dev, shape, value, dtype, batch
def run_tf_dataset_with_random_input(dev, max_shape, dtype, batch="dataset"):
min_shape = get_min_shape_helper(batch, max_shape)
it = RandomSampleIterator(max_shape, dtype(0), min_shape=min_shape)
get_pipeline_desc = external_source_tester(max_shape, dtype, it, batch=batch)
to_dataset = external_source_converter_with_callback(
RandomSampleIterator, max_shape, dtype, 0, 1e10, min_shape, batch=batch)
run_tf_dataset_eager_mode(dev, get_pipeline_desc=get_pipeline_desc, to_dataset=to_dataset)
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_dataset_with_random_input():
for dev in ['cpu', 'gpu']:
for max_shape in [(10, 20), (120, 120, 3), (3, 40, 40, 4)]:
for dtype in [np.uint8, np.int32, np.float32]:
for batch in ["dataset", False, True, None]:
yield run_tf_dataset_with_random_input, dev, max_shape, dtype, batch
# Run with everything on GPU (External Source op as well)
def run_tf_dataset_with_random_input_gpu(max_shape, dtype, batch):
min_shape = get_min_shape_helper(batch, max_shape)
it = RandomSampleIterator(max_shape, dtype(0), min_shape=min_shape)
get_pipeline_desc = external_source_tester(max_shape, dtype, it, "gpu", batch=batch)
to_dataset = external_source_converter_with_callback(
RandomSampleIterator, max_shape, dtype, 0, 1e10, min_shape, batch=batch)
run_tf_dataset_eager_mode("gpu", get_pipeline_desc=get_pipeline_desc, to_dataset=to_dataset)
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_dataset_with_random_input_gpu():
for max_shape in [(10, 20), (120, 120, 3), (3, 40, 40, 4)]:
for dtype in [np.uint8, np.int32, np.float32]:
for batch in ["dataset", False, True, None]:
yield run_tf_dataset_with_random_input_gpu, max_shape, dtype, batch
def run_tf_dataset_no_copy(max_shape, dtype, dataset_dev, es_dev, no_copy):
get_pipeline_desc = external_source_tester(
max_shape, dtype, RandomSampleIterator(max_shape, dtype(0)), es_dev, no_copy)
to_dataset = external_source_converter_with_callback(RandomSampleIterator, max_shape, dtype)
run_tf_dataset_eager_mode(dataset_dev,
get_pipeline_desc=get_pipeline_desc,
to_dataset=to_dataset)
# Check if setting no_copy flags in all placement scenarios is ok as we override it internally
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_dataset_with_no_copy():
for max_shape in [(10, 20), (120, 120, 3)]:
for dataset_dev in ["cpu", "gpu"]:
for es_dev in ["cpu", "gpu"]:
if dataset_dev == "cpu" and es_dev == "gpu":
continue # GPU op in CPU dataset not supported
for no_copy in [True, False, None]:
yield run_tf_dataset_no_copy, max_shape, np.uint8, dataset_dev, es_dev, no_copy
def run_tf_dataset_with_stop_iter(dev, max_shape, dtype, stop_samples):
it1 = RandomSampleIterator(max_shape, dtype(0), start=0, stop=stop_samples)
get_pipeline_desc = external_source_tester(max_shape, dtype, it1)
to_dataset = external_source_converter_with_callback(
RandomSampleIterator, max_shape, dtype, 0, stop_samples)
run_tf_dataset_eager_mode(dev,
to_stop_iter=True,
get_pipeline_desc=get_pipeline_desc,
to_dataset=to_dataset)
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_dataset_with_stop_iter():
batch_size = 12
for dev in ['cpu', 'gpu']:
for max_shape in [(10, 20), (120, 120, 3), (3, 40, 40, 4)]:
for dtype in [np.uint8, np.int32, np.float32]:
for iters in [1, 2, 3, 4, 5]:
yield run_tf_dataset_with_stop_iter, \
dev, max_shape, dtype, iters * batch_size - 3
def run_tf_dataset_multi_input(dev, start_values, input_names, batches):
run_tf_dataset_eager_mode(
dev,
get_pipeline_desc=external_source_tester_multiple(
start_values, input_names, batches),
to_dataset=external_source_converter_multiple(start_values, input_names, batches))
start_values = [[np.full((2, 4), 42, dtype=np.int64),
np.full((3, 5), 123.0, dtype=np.float32)],
[np.full((3, 5), 3.14, dtype=np.float32)],
[
np.full((2, 4), 42, dtype=np.int64),
np.full((3, 5), 666.0, dtype=np.float32),
np.full((1, 7), -5, dtype=np.int8)
]]
input_names = [["input_{}".format(i) for i, _ in enumerate(vals)] for vals in start_values]
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_dataset_multi_input():
for dev in ['cpu', 'gpu']:
for starts, names in zip(start_values, input_names):
yield run_tf_dataset_multi_input, dev, starts, names, ["dataset" for _ in input_names]
for batches in list(itertools.product([True, False], repeat=len(input_names))):
yield run_tf_dataset_multi_input, dev, starts, names, batches
@raises(tf.errors.InternalError, glob='TF device and DALI device mismatch')
def test_tf_dataset_wrong_placement_cpu():
batch_size = 12
num_threads = 4
pipeline = get_image_pipeline(batch_size, num_threads, 'cpu', 0)
with tf.device('/gpu:0'):
dataset = get_dali_dataset_from_pipeline(pipeline, 'gpu', 0)
for sample in dataset:
pass
@raises(tf.errors.InternalError, glob='TF device and DALI device mismatch')
def test_tf_dataset_wrong_placement_gpu():
batch_size = 12
num_threads = 4
pipeline = get_image_pipeline(batch_size, num_threads, 'gpu', 0)
with tf.device('/cpu:0'):
dataset = get_dali_dataset_from_pipeline(pipeline, 'cpu', 0)
for sample in dataset:
pass
def check_basic_dataset_build(input_datasets):
input_names = ["a", "b"]
batches = ["dataset" for _ in input_names]
pipe = many_input_pipeline(True, "cpu", None, input_names, batches,
batch_size=8, num_threads=4, device_id=0)
with tf.device('/cpu:0'):
dali_dataset = dali_tf.experimental.DALIDatasetWithInputs(
input_datasets=input_datasets,
pipeline=pipe,
batch_size=pipe.max_batch_size,
output_shapes=(None, None),
output_dtypes=(tf.int32, tf.int32),
num_threads=pipe.num_threads,
device_id=pipe.device_id)
return dali_dataset
@raises(TypeError,
glob='`input_datasets` must be a dictionary that maps input names * to input datasets')
def check_tf_dataset_wrong_input_type(wrong_input_datasets):
check_basic_dataset_build(wrong_input_datasets)
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_dataset_wrong_input_type():
input_dataset = tf.data.Dataset.from_tensors(np.full((2, 2), 42)).repeat()
# wrong `input_datasets` type (no dictionary)
for wrong_input_dataset in ["a", input_dataset, [input_dataset]]:
yield check_tf_dataset_wrong_input_type, wrong_input_dataset
# wrong values in dictionary
for wrong_input_dataset in ["str", [input_dataset]]:
yield check_tf_dataset_wrong_input_type, \
{"a": wrong_input_dataset, "b": wrong_input_dataset}
# wrong keys in dictionary
for wrong_input_name in [42, ("a", "b")]:
yield check_tf_dataset_wrong_input_type, \
{wrong_input_name: input_dataset}
@raises(ValueError,
glob='Found External Source nodes in the Pipeline, that were not assigned any inputs.')
@with_setup(skip_for_incompatible_tf)
def test_input_not_provided():
input_dataset = tf.data.Dataset.from_tensors(np.full((2, 2), 42)).repeat()
check_basic_dataset_build({"a": input_dataset})
@raises(ValueError,
glob='Did not find an External Source placeholder node * in the provided pipeline')
@with_setup(skip_for_incompatible_tf)
def test_missing_es_node():
input_dataset = tf.data.Dataset.from_tensors(np.full((2, 2), 42)).repeat()
check_basic_dataset_build({
"a": input_dataset,
"b": input_dataset,
"c": input_dataset
})
@pipeline_def(batch_size=10, num_threads=4, device_id=0)
def es_pipe(kwargs):
return fn.external_source(**kwargs)
def check_single_es_pipeline(kwargs, input_datasets):
pipe = es_pipe(kwargs)
with tf.device('/cpu:0'):
dali_dataset = dali_tf.experimental.DALIDatasetWithInputs(
input_datasets=input_datasets,
pipeline=pipe,
batch_size=pipe.max_batch_size,
output_shapes=(None, None),
output_dtypes=(tf.int32, tf.int32),
num_threads=pipe.num_threads,
device_id=pipe.device_id)
return dali_dataset
@raises(ValueError,
glob='Did not find an External Source placeholder node * in the provided pipeline')
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_dataset_es_with_source():
in_dataset = tf.data.Dataset.from_tensors(np.full((2, 2), 42)).repeat()
check_single_es_pipeline({'name': 'a', 'source': []}, {'a': in_dataset})
@raises(ValueError,
glob='The parameter ``num_outputs`` is only valid when using ``source`` to provide data.')
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_dataset_es_num_outputs_provided():
in_dataset = tf.data.Dataset.from_tensors(np.full((2, 2), 42)).repeat()
check_single_es_pipeline({'name': 'a', 'num_outputs': 1}, {'a': in_dataset})
@raises(ValueError,
glob='Found placeholder External Source node * in the Pipeline that was not named')
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_dataset_disallowed_es():
check_single_es_pipeline({}, {})
def check_layout(kwargs, input_datasets, layout):
pipe = Pipeline(10, 4, 0)
with pipe:
input = fn.external_source(**kwargs)
# Rely on the Pad internal check to ensure that External Source set layout
pipe.set_outputs(fn.pad(input, axis_names=layout))
with tf.device('/cpu:0'):
dali_dataset = dali_tf.experimental.DALIDatasetWithInputs(
input_datasets=input_datasets,
pipeline=pipe,
batch_size=pipe.max_batch_size,
output_shapes=None,
output_dtypes=tf.int64,
num_threads=pipe.num_threads,
device_id=pipe.device_id)
run_dataset_eager_mode(dali_dataset, 10)
def run_tf_with_dali_external_source(dev, es_args, ed_dev, dtype, *_):
run_tf_dataset_eager_mode(
dev,
get_pipeline_desc=get_external_source_pipe(es_args, dtype, ed_dev),
to_dataset=external_source_to_tf_dataset,
to_stop_iter=True)
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_with_dali_external_source():
yield from gen_tf_with_dali_external_source(run_tf_with_dali_external_source)
@with_setup(skip_inputs_for_incompatible_tf)
def test_tf_dataset_layouts():
for shape, layout in [((2, 3), "XY"), ((10, 20, 3), "HWC"), ((4, 128, 64, 3), "FHWC")]:
in_dataset = tf.data.Dataset.from_tensors(np.full(shape, 42)).repeat()
# Captured from pipeline
yield check_layout, {'layout': layout, 'name': 'in'}, {'in': in_dataset}, layout
# Captured from pipeline
yield check_layout, {'layout': layout, 'name': 'in'}, {'in': Input(in_dataset)}, layout
# Set via experimental.Input, not specified in external source
yield check_layout, {'name': 'in'}, {'in': Input(in_dataset, layout=layout)}, layout
# Test if the TypeError is raised for unsupported arguments for regular DALIDataset
@raises(TypeError,
glob='Dataset inputs are allowed only in *DALIDatasetWithInputs')
def test_tf_experimental_inputs_disabled():
pipeline = get_image_pipeline(4, 4, 'cpu', 0)
dali_tf.DALIDataset(pipeline,
input_datasets={"test": tf.data.Dataset.from_tensors(np.int32([42, 42]))})
# Test if the ValueError is raised for external source with `source`.
@raises(ValueError,
glob='DALIDataset got a DALI pipeline containing External Source operator nodes')
def test_tf_experimental_source_disabled():
pipe = Pipeline(10, 4, 0)
with pipe:
input = fn.external_source(source=lambda: np.full((4, 4), 0), batch=False)
pipe.set_outputs(fn.pad(input))
dali_tf.DALIDataset(
pipe,
output_dtypes=tf.int32)
# This test should be private (name starts with _) as it is called separately in L1
def _test_tf_dataset_other_gpu():
run_tf_dataset_eager_mode('gpu', 1)
# This test should be private (name starts with _) as it is called separately in L1
def _test_tf_dataset_multigpu_manual_placement():
run_tf_dataset_multigpu_eager_manual_placement()
# This test should be private (name starts with _) as it is called separately in L1
@with_setup(skip_for_incompatible_tf)
def _test_tf_dataset_multigpu_mirrored_strategy():
run_tf_dataset_multigpu_eager_mirrored_strategy()
|
DALI-main
|
dali/test/python/test_dali_tf_dataset_eager.py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Used in test_external_source_parallel_custom_serialization to check if modules
# are properly imported during callback deserialization. Such test only makes sense
# if this module is not automatically imported when worker process starts, so don't
# import this file globally
import numpy as np
def cb(x):
return np.full((10, 100), x.idx_in_epoch)
|
DALI-main
|
dali/test/python/import_module_test_helper.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflow.compat.v1 as tf_v1
from nose import with_setup
import test_dali_tf_dataset_mnist as mnist
from nose_utils import raises
mnist.tf.compat.v1.disable_eager_execution()
@with_setup(tf.keras.backend.clear_session)
def test_keras_single_gpu():
mnist.run_keras_single_device('gpu', 0)
@with_setup(tf.keras.backend.clear_session)
def test_keras_single_other_gpu():
mnist.run_keras_single_device('gpu', 1)
@with_setup(tf.keras.backend.clear_session)
def test_keras_single_cpu():
mnist.run_keras_single_device('cpu', 0)
@raises(Exception, "TF device and DALI device mismatch. TF*: CPU, DALI*: GPU for output")
def test_keras_wrong_placement_gpu():
with tf.device('cpu:0'):
model = mnist.keras_model()
train_dataset = mnist.get_dataset('gpu', 0)
model.fit(train_dataset, epochs=mnist.EPOCHS, steps_per_epoch=mnist.ITERATIONS)
@raises(Exception, "TF device and DALI device mismatch. TF*: GPU, DALI*: CPU for output")
def test_keras_wrong_placement_cpu():
with tf.device('gpu:0'):
model = mnist.keras_model()
train_dataset = mnist.get_dataset('cpu', 0)
model.fit(train_dataset, epochs=mnist.EPOCHS, steps_per_epoch=mnist.ITERATIONS)
@with_setup(tf.compat.v1.reset_default_graph)
def test_graph_single_gpu():
mnist.run_graph_single_device('gpu', 0)
@with_setup(tf.compat.v1.reset_default_graph)
def test_graph_single_cpu():
mnist.run_graph_single_device('cpu', 0)
@with_setup(tf.compat.v1.reset_default_graph)
def test_graph_single_other_gpu():
mnist.run_graph_single_device('gpu', 1)
# This function is copied form:
# github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10_multi_gpu_train.py#L102
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf_v1.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf_v1.concat(grads, 0)
grad = tf_v1.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
@with_setup(tf_v1.reset_default_graph)
def test_graph_multi_gpu():
iterator_initializers = []
with tf.device('/cpu:0'):
tower_grads = []
for i in range(mnist.num_available_gpus()):
with tf.device('/gpu:{}'.format(i)):
daliset = mnist.get_dataset('gpu', i, i, mnist.num_available_gpus())
iterator = tf_v1.data.make_initializable_iterator(daliset)
iterator_initializers.append(iterator.initializer)
images, labels = iterator.get_next()
images = tf_v1.reshape(images,
[mnist.BATCH_SIZE, mnist.IMAGE_SIZE * mnist.IMAGE_SIZE])
labels = tf_v1.reshape(tf_v1.one_hot(labels, mnist.NUM_CLASSES),
[mnist.BATCH_SIZE, mnist.NUM_CLASSES])
logits_train = mnist.graph_model(images, reuse=(i != 0), is_training=True)
logits_test = mnist.graph_model(images, reuse=True, is_training=False)
loss_op = tf_v1.reduce_mean(
tf_v1.nn.softmax_cross_entropy_with_logits(logits=logits_train, labels=labels))
optimizer = tf_v1.train.AdamOptimizer()
grads = optimizer.compute_gradients(loss_op)
if i == 0:
correct_pred = tf_v1.equal(tf_v1.argmax(logits_test, 1),
tf_v1.argmax(labels, 1))
accuracy = tf_v1.reduce_mean(
tf_v1.cast(correct_pred, tf_v1.float32))
tower_grads.append(grads)
tower_grads = average_gradients(tower_grads)
train_step = optimizer.apply_gradients(tower_grads)
mnist.train_graph(iterator_initializers, train_step, accuracy)
@with_setup(mnist.clear_checkpoints, mnist.clear_checkpoints)
def test_estimators_single_gpu():
mnist.run_estimators_single_device('gpu', 0)
@with_setup(mnist.clear_checkpoints, mnist.clear_checkpoints)
def test_estimators_single_other_gpu():
mnist.run_estimators_single_device('gpu', 1)
@with_setup(mnist.clear_checkpoints, mnist.clear_checkpoints)
def test_estimators_single_cpu():
mnist.run_estimators_single_device('cpu', 0)
|
DALI-main
|
dali/test/python/test_dali_tf_dataset_mnist_graph.py
|
#!/usr/bin/python3
# Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from test_utils import AverageMeter
import time
from test_RN50_external_source_parallel_utils import (
parse_test_arguments, external_source_parallel_pipeline, external_source_pipeline,
file_reader_pipeline, get_pipe_factories)
# This test requires significant amount of shared memory to be able to pass
# the batches between worker processes and the main process. If running in docker
# make sure that -shm-size is big enough.
def iteration_test(args):
test_pipe_factories = get_pipe_factories(
args.test_pipes, external_source_parallel_pipeline, file_reader_pipeline,
external_source_pipeline)
for pipe_factory in test_pipe_factories:
# TODO(klecki): We don't handle sharding in this test yet, would need to do it manually
# for External Source pipelines
pipes = [pipe_factory(
batch_size=args.batch_size,
num_threads=args.workers,
device_id=gpu,
data_path=args.data_path,
prefetch_queue_depth=args.prefetch,
reader_queue_depth=args.reader_queue_depth,
py_start_method=args.worker_init,
py_num_workers=args.py_workers,
source_mode=args.source_mode,
read_encoded=args.dali_decode,
) for gpu in range(args.gpus)]
# First start the Python workers, so we fork without CUDA context.
for pipe in pipes:
pipe.start_py_workers()
for pipe in pipes:
pipe.build()
samples_no = pipes[0].epoch_size("Reader")
if args.benchmark_iters is None:
expected_iters = samples_no // args.batch_size + (samples_no % args.batch_size != 0)
else:
expected_iters = args.benchmark_iters
print("RUN {}".format(pipe_factory.__name__))
for i in range(args.epochs):
if i == 0:
print("Warm up")
else:
print("Test run " + str(i))
data_time = AverageMeter()
end = time.time()
frequency = 50
for j in range(expected_iters):
stop_iter = False
for pipe in pipes:
try:
pipe.run()
except StopIteration:
assert j == expected_iters - 1
stop_iter = True
if stop_iter:
break
if j % frequency == 0 and j != 0:
data_time.update((time.time() - end) / frequency)
end = time.time()
print(f"{pipe_factory.__name__} {j}/ {expected_iters}, "
f"avg time: {data_time.avg} [s], worst time: {data_time.max_val} [s], "
f"speed: {args.batch_size * args.gpus / data_time.avg} [img/s]")
for pipe in pipes:
pipe.reset()
print("OK {}".format(pipe_factory.__name__))
if __name__ == "__main__":
args = parse_test_arguments(False)
iteration_test(args)
|
DALI-main
|
dali/test/python/test_RN50_external_source_parallel_data.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import warnings
from numpy.testing import assert_array_equal
from nvidia.dali import pipeline_def
from nvidia.dali.backend_impl import TensorCPU, TensorListCPU, TensorListGPU
from nvidia.dali.backend_impl import types as types_
import nvidia.dali as dali
from nose_utils import assert_raises
from nose import SkipTest
from test_utils import dali_type_to_np, py_buffer_from_address, get_device_memory_info
def test_preallocation():
dali.backend.PreallocateDeviceMemory(0, 0) # initialize the context
dali.backend.ReleaseUnusedMemory()
mem_info = get_device_memory_info()
if mem_info is None:
raise SkipTest("Python bindings for NVML not found, skipping")
free_before_prealloc = mem_info.free
size = 256 << 20
dali.backend.PreallocateDeviceMemory(size, 0)
free_after_prealloc = get_device_memory_info().free
assert free_after_prealloc < free_before_prealloc # check that something was allocated
dali.backend.ReleaseUnusedMemory()
free_after_release = get_device_memory_info().free
assert free_after_release > free_after_prealloc # check that something was freed
def test_create_tensor():
arr = np.random.rand(3, 5, 6)
tensor = TensorCPU(arr, "NHWC")
assert_array_equal(arr, np.array(tensor))
def test_create_tensorlist():
arr = np.random.rand(3, 5, 6)
tensorlist = TensorListCPU(arr, "NHWC")
assert_array_equal(arr, tensorlist.as_array())
def test_create_tensorlist_list():
arr = np.random.rand(3, 5, 6)
tensorlist = TensorListCPU([arr], "NHWC")
assert_array_equal(arr.reshape(tuple([1]) + arr.shape), tensorlist.as_array())
def test_create_tensorlist_as_tensor():
arr = np.random.rand(3, 5, 6)
tensorlist = TensorListCPU(arr, "NHWC")
tensor = tensorlist.as_tensor()
assert_array_equal(np.array(tensor), tensorlist.as_array())
def test_empty_tensor_tensorlist():
arr = np.array([], dtype=np.float32)
tensor = TensorCPU(arr, "NHWC")
tensorlist = TensorListCPU(arr, "NHWC")
assert_array_equal(np.array(tensor), tensorlist.as_array())
assert np.array(tensor).shape == (0,)
assert tensorlist.as_array().shape == (0,)
def test_tensorlist_getitem_cpu():
arr = np.random.rand(3, 5, 6)
tensorlist = TensorListCPU(arr, "NHWC")
list_of_tensors = [x for x in tensorlist]
assert type(tensorlist.at(0)) is np.ndarray
assert type(tensorlist[0]) is not np.ndarray
assert type(tensorlist[0]) is TensorCPU
assert type(tensorlist[-3]) is TensorCPU
assert len(list_of_tensors) == len(tensorlist)
with assert_raises(IndexError, glob="out of range"):
tensorlist[len(tensorlist)]
with assert_raises(IndexError, glob="out of range"):
tensorlist[-len(tensorlist) - 1]
def test_data_ptr_tensor_cpu():
arr = np.random.rand(3, 5, 6)
tensor = TensorCPU(arr, "NHWC")
from_tensor = py_buffer_from_address(tensor.data_ptr(), tensor.shape(),
types.to_numpy_type(tensor.dtype))
assert np.array_equal(arr, from_tensor)
def test_data_ptr_tensor_list_cpu():
arr = np.random.rand(3, 5, 6)
tensorlist = TensorListCPU(arr, "NHWC")
tensor = tensorlist.as_tensor()
from_tensor_list = py_buffer_from_address(tensorlist.data_ptr(), tensor.shape(),
types.to_numpy_type(tensor.dtype))
assert np.array_equal(arr, from_tensor_list)
def test_array_interface_tensor_cpu():
arr = np.random.rand(3, 5, 6)
tensorlist = TensorListCPU(arr, "NHWC")
assert tensorlist[0].__array_interface__['data'][0] == tensorlist[0].data_ptr()
assert not tensorlist[0].__array_interface__['data'][1]
assert np.array_equal(tensorlist[0].__array_interface__['shape'], tensorlist[0].shape())
assert np.dtype(tensorlist[0].__array_interface__['typestr']) == np.dtype(
types.to_numpy_type(tensorlist[0].dtype))
def check_transfer(dali_type):
arr = np.random.rand(3, 5, 6)
data = dali_type(arr)
data_gpu = data._as_gpu()
data_cpu = data_gpu.as_cpu()
if dali_type is TensorListCPU:
np.testing.assert_array_equal(arr, data_cpu.as_array())
else:
np.testing.assert_array_equal(arr, np.array(data_cpu))
def test_transfer_cpu_gpu():
for dali_type in [TensorCPU, TensorListCPU]:
yield check_transfer, dali_type
def check_array_types(t):
arr = np.array([[-0.39, 1.5], [-1.5, 0.33]], dtype=t)
tensor = TensorCPU(arr, "NHWC")
assert np.allclose(np.array(arr), np.asanyarray(tensor))
def test_array_interface_types():
for t in [np.bool_, np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64, np.float_, np.float32, np.float16,
np.short, np.long, np.longlong, np.ushort, np.ulonglong]:
yield check_array_types, t
def layout_compatible(a, b):
if a is None:
a = ""
if b is None:
b = ""
return a == b
# TODO(spanev): figure out which return_value_policy to choose
# def test_tensorlist_getitem_slice():
# arr = np.random.rand(3, 5, 6)
# tensorlist = TensorListCPU(arr, "NHWC")
# two_first_tensors = tensorlist[0:2]
# assert type(two_first_tensors) == tuple
# assert type(two_first_tensors[0]) == TensorCPU
def test_tensor_cpu_squeeze():
def check_squeeze(shape, dim, in_layout, expected_out_layout):
arr = np.random.rand(*shape)
t = TensorCPU(arr, in_layout)
is_squeezed = t.squeeze(dim)
should_squeeze = (len(expected_out_layout) < len(in_layout))
arr_squeeze = arr.squeeze(dim)
t_shape = tuple(t.shape())
assert t_shape == arr_squeeze.shape, f"{t_shape} != {arr_squeeze.shape}"
assert t.layout() == expected_out_layout, f"{t.layout()} != {expected_out_layout}"
assert layout_compatible(t.get_property("layout"), expected_out_layout), \
f'{t.get_property("layout")} doesn\'t match {expected_out_layout}'
assert np.allclose(arr_squeeze, np.array(t))
assert is_squeezed == should_squeeze, f"{is_squeezed} != {should_squeeze}"
for dim, shape, in_layout, expected_out_layout in [
(None, (3, 5, 6), "ABC", "ABC"),
(None, (3, 1, 6), "ABC", "AC"),
(1, (3, 1, 6), "ABC", "AC"),
(-2, (3, 1, 6), "ABC", "AC"),
(None, (1, 1, 6), "ABC", "C"),
(1, (1, 1, 6), "ABC", "AC"),
(None, (1, 1, 1), "ABC", ""),
(None, (1, 5, 1), "ABC", "B"),
(-1, (1, 5, 1), "ABC", "AB"),
(0, (1, 5, 1), "ABC", "BC"),
(None, (3, 5, 1), "ABC", "AB")
]:
yield check_squeeze, shape, dim, in_layout, expected_out_layout
def test_tensorlist_shape():
shapes = [(3, 4, 5, 6), (1, 8, 7, 6, 5), (1,), (1, 1)]
for shape in shapes:
arr = np.empty(shape)
tl = TensorListCPU(arr)
tl_gpu = tl._as_gpu()
assert tl.shape() == [shape[1:]] * shape[0]
assert tl_gpu.shape() == [shape[1:]] * shape[0]
def test_tl_from_list_of_tensors_same_shape():
for shape in [(10, 1), (4, 5, 6), (13, 1), (1, 1)]:
arr = np.random.rand(*shape)
tl_cpu_from_np = TensorListCPU(arr)
tl_cpu_from_tensors = TensorListCPU([TensorCPU(a) for a in arr])
np.testing.assert_array_equal(tl_cpu_from_np.as_array(), tl_cpu_from_tensors.as_array())
tl_gpu_from_np = tl_cpu_from_np._as_gpu()
tl_gpu_from_tensors = TensorListGPU([TensorCPU(a)._as_gpu() for a in arr])
np.testing.assert_array_equal(tl_gpu_from_np.as_cpu().as_array(),
tl_gpu_from_tensors.as_cpu().as_array())
def test_tl_from_list_of_tensors_different_shapes():
shapes = [(1, 2, 3), (4, 5, 6), (128, 128, 128), (8, 8, 8), (13, 47, 131)]
for size in [10, 5, 36, 1]:
np_arrays = [np.random.rand(*shapes[i])
for i in np.random.choice(range(len(shapes)), size=size)]
tl_cpu = TensorListCPU([TensorCPU(a) for a in np_arrays])
tl_gpu = TensorListGPU([TensorCPU(a)._as_gpu() for a in np_arrays])
for arr, tensor_cpu, tensor_gpu in zip(np_arrays, tl_cpu, tl_gpu):
np.testing.assert_array_equal(arr, tensor_cpu)
np.testing.assert_array_equal(arr, tensor_gpu.as_cpu())
def test_tl_from_list_of_tensors_empty():
with assert_raises(RuntimeError, glob='Cannot create TensorList from an empty list.'):
TensorListCPU([])
with assert_raises(RuntimeError, glob='Cannot create TensorList from an empty list.'):
TensorListGPU([])
def test_tl_from_list_of_tensors_different_backends():
t1 = TensorCPU(np.zeros((1)))
t2 = TensorCPU(np.zeros((1)))._as_gpu()
with assert_raises(TypeError, glob='Object at position 1 cannot be converted to TensorCPU'):
TensorListCPU([t1, t2])
with assert_raises(TypeError, glob='Object at position 1 cannot be converted to TensorGPU'):
TensorListGPU([t2, t1])
def test_tl_from_list_of_tensors_different_dtypes():
np_types = [np.float32, np.float16, np.int16, np.int8, np.uint16, np.uint8]
for dtypes in np.random.choice(np_types, size=(3, 2), replace=False):
t1 = TensorCPU(np.zeros((1), dtype=dtypes[0]))
t2 = TensorCPU(np.zeros((1), dtype=dtypes[1]))
with assert_raises(TypeError,
glob="Tensors cannot have different data types. Tensor at position 1 has type '*' expected to have type '*'."): # noqa: E501
TensorListCPU([t1, t2])
def test_dtype_deprecation_warning():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
TensorCPU(np.empty((0))).dtype()
assert "Calling '.dtype()' is deprecated, please use '.dtype' instead" == str(w[-1].message)
def test_dtype_placeholder_equivalence():
dali_types = types._all_types
np_types = list(map(dali_type_to_np, dali_types))
for dali_type, np_type in zip(dali_types, np_types):
assert TensorCPU(np.zeros((1), dtype=np_type)).dtype == dali_type
@pipeline_def(batch_size=8, num_threads=3, device_id=0)
def dtype_pipeline(np_type, placeholder_dali_type):
res = fn.external_source(source=np.zeros((8, 1), dtype=np_type), dtype=placeholder_dali_type)
return res
def test_dtype_converion():
dali_types = [types_._DALIDataType.INT8,
types_._DALIDataType.UINT64,
types_._DALIDataType.FLOAT16]
np_types = list(map(dali_type_to_np, dali_types))
for dali_type, np_type in zip(dali_types, np_types):
pipe = dtype_pipeline(np_type, dali_type)
pipe.build()
assert pipe.run()[0].dtype == dali_type
def test_tensorlist_dtype():
dali_types = types._all_types
np_types = list(map(dali_type_to_np, dali_types))
for dali_type, np_type in zip(dali_types, np_types):
tl = TensorListCPU([TensorCPU(np.zeros((1), dtype=np_type))])
assert tl.dtype == dali_type
assert tl._as_gpu().dtype == dali_type
def _expected_tensorlist_str(device, data, dtype, num_samples, shape, layout=None):
return '\n '.join([f'TensorList{device.upper()}(', f'{data},', f'dtype={dtype},'] +
([f'layout={layout}'] if layout is not None else []) +
[f'num_samples={num_samples},', f'shape={shape})'])
def _expected_tensor_str(device, data, dtype, shape, layout=None):
return '\n '.join([f'Tensor{device.upper()}(', f'{data},', f'dtype={dtype},'] +
([f'layout={layout}'] if layout is not None else []) +
[f'shape={shape})'])
def _test_str(tl, expected_params, expected_func):
assert str(tl) == expected_func('cpu', *expected_params)
assert str(tl._as_gpu()) == expected_func('gpu', *expected_params)
def test_tensorlist_str_empty():
tl = TensorListCPU(np.empty(0))
params = [[], 'DALIDataType.FLOAT64', 0, []]
_test_str(tl, params, _expected_tensorlist_str)
def test_tensorlist_str_scalars():
arr = np.arange(10)
tl = TensorListCPU(arr)
params = [arr, 'DALIDataType.INT64', 10, '[(), (), (), (), (), (), (), (), (), ()]']
_test_str(tl, params, _expected_tensorlist_str)
def test_tensor_str_empty():
t = TensorCPU(np.empty(0))
params = [[], 'DALIDataType.FLOAT64', [0]]
_test_str(t, params, _expected_tensor_str)
def test_tensor_str_sample():
arr = np.arange(16)
t = TensorCPU(arr)
params = [arr, 'DALIDataType.INT64', [16]]
_test_str(t, params, _expected_tensor_str)
def test_tensor_expose_dlpack_capsule():
# TODO(awolant): Numpy versions for Python 3.6 and 3.7 do not
# support from_dlpack. When we upgrade DLPack support for DALI
# this test needs to be changed.
if not hasattr(np, "from_dlpack"):
raise SkipTest("Test requires Numpy DLPack support.")
arr = np.arange(20)
tensor = TensorCPU(arr, "NHWC")
capsule = tensor._expose_dlpack_capsule()
# TODO(awolant): This adapter is required due to various implementations
# for DLPack interface. When we extend DLPack export support this should
# be removed.
class dlpack_interface_adapter:
def __init__(self, capsule):
self.capsule = capsule
def __dlpack__(self):
return self.capsule
arr_from_dlapck = np.from_dlpack(dlpack_interface_adapter(capsule))
assert np.array_equal(arr, arr_from_dlapck)
|
DALI-main
|
dali/test/python/test_backend_impl.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nose_utils # noqa:F401 - for Python 3.10
from nvidia.dali.pipeline import pipeline_def
import nvidia.dali.types as types
import nvidia.dali.plugin.tf as dali_tf
from test_utils_tensorflow import skip_for_incompatible_tf
import tensorflow as tf
import random
import numpy as np
@pipeline_def()
def get_dali_pipe(value):
data = types.Constant(value)
return data
def test_dali_tf_dataset_cpu_only():
skip_for_incompatible_tf()
try:
tf.compat.v1.enable_eager_execution()
except Exception:
pass
batch_size = 3
value = random.randint(0, 1000)
pipe = get_dali_pipe(batch_size=batch_size,
device_id=types.CPU_ONLY_DEVICE_ID,
num_threads=1,
value=value)
with tf.device('/cpu'):
ds = dali_tf.DALIDataset(pipe,
device_id=types.CPU_ONLY_DEVICE_ID,
batch_size=1,
output_dtypes=tf.int32,
output_shapes=[1])
ds = iter(ds)
data = next(ds)
assert data == np.array([value])
|
DALI-main
|
dali/test/python/test_dali_tf_plugin_cpu_only_dataset.py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# it is enough to just import all functions from test_internals_operator_external_source
# nose will query for the methods available and will run them
# the test_internals_operator_external_source is 99% the same for cupy and numpy tests
# so it is better to store everything in one file and just call `use_cupy`
# to switch between the default numpy and cupy
import mxnet as mx
from nose import with_setup
from nose_utils import raises
from test_pool_utils import setup_function
from test_external_source_parallel_utils import (ExtCallback,
check_spawn_with_callback,
create_pipe,
build_and_run_pipeline)
import numpy as np
class ExtCallbackMX(ExtCallback):
def __call__(self, sample_info):
a = super().__call__(sample_info)
return mx.nd.array(a, dtype=a.dtype)
def test_mxnet():
yield from check_spawn_with_callback(ExtCallbackMX)
class ExtCallbackMXCuda(ExtCallback):
def __call__(self, sample_info):
a = super().__call__(sample_info)
return mx.nd.array(a, dtype=a.dtype, ctx=mx.gpu(0))
@raises(Exception, "Exception traceback received from worker thread*"
"TypeError: Unsupported callback return type. GPU tensors*not supported*"
"Got*MXNet GPU tensor.")
@with_setup(setup_function)
def test_mxnet_cuda():
callback = ExtCallbackMXCuda((4, 5), 10, np.int32)
pipe = create_pipe(callback, 'cpu', 5, py_num_workers=6,
py_start_method='spawn', parallel=True)
build_and_run_pipeline(pipe)
|
DALI-main
|
dali/test/python/test_external_source_parallel_mxnet.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nose.tools import with_setup
from nvidia.dali import pipeline_def
import nvidia.dali.fn as fn
from test_external_source_parallel_utils import setup_function, teardown_function, capture_processes
def large_sample_cb(sample_info):
return np.full((512, 1024, 1024), sample_info.idx_in_epoch, dtype=np.int32)
@with_setup(setup_function, teardown_function)
def _test_large_sample(start_method):
batch_size = 2
@pipeline_def
def create_pipeline():
large = fn.external_source(
large_sample_cb, batch=False, parallel=True, prefetch_queue_depth=1)
# iteration over array in Python is too slow, so reduce the number of elements
# to iterate over
reduced = fn.reductions.sum(large, axes=(1, 2))
return reduced
pipe = create_pipeline(batch_size=batch_size, py_num_workers=2, py_start_method=start_method,
prefetch_queue_depth=1, num_threads=2, device_id=0)
pipe.build()
capture_processes(pipe._py_pool)
for batch_idx in range(8):
(out,) = pipe.run()
for idx_in_batch in range(batch_size):
idx_in_epoch = batch_size * batch_idx + idx_in_batch
expected_val = idx_in_epoch * 1024 * 1024
a = np.array(out[idx_in_batch])
assert a.shape == (512,), "Expected shape (512,) but got {}".format(a.shape)
for val in a.flat:
assert val == expected_val, (
f"Unexpected value in batch: got {val}, expected {expected_val}, "
f"for batch {batch_idx}, sample {idx_in_batch}")
def test_large_sample():
for start_method in ("fork", "spawn"):
yield _test_large_sample, start_method
|
DALI-main
|
dali/test/python/test_external_source_parallel_large_sample.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali._multiproc.pool import WorkerPool
from nvidia.dali._multiproc.messages import TaskArgs, SampleRange
from contextlib import closing
from nvidia.dali._utils.external_source_impl import get_callback_from_source
from nvidia.dali.types import SampleInfo
from functools import wraps
import numpy as np
import os
from nose.tools import with_setup
from nose_utils import raises
from test_pool_utils import capture_processes, setup_function, teardown_function
def answer(pid, info):
return np.array([pid, info.idx_in_epoch, info.idx_in_batch, info.iteration])
def simple_callback(info):
pid = os.getpid()
return answer(pid, info)
def another_callback(info):
return simple_callback(info) + 100
class IteratorCb:
def __init__(self):
self.count = 0
self.pid = None
def __iter__(self):
self.pid = os.getpid()
self.count = 0
return self
def __next__(self):
self.count += 1
return [np.array([self.pid, self.count]) for i in range(self.count)]
class MockGroup:
def __init__(self, source_desc, batch, prefetch_queue_depth, bytes_per_sample_hint):
self.source_desc = source_desc
self.batch = batch
self.prefetch_queue_depth = prefetch_queue_depth
self.bytes_per_sample_hint = bytes_per_sample_hint
@classmethod
def from_callback(cls, callback, batch=False, prefetch_queue_depth=1,
bytes_per_sample_hint=None):
_, source_desc = get_callback_from_source(callback, cycle=None)
return cls(source_desc, batch, prefetch_queue_depth, bytes_per_sample_hint)
def create_pool(groups, keep_alive_queue_size=1, num_workers=1, start_method="fork"):
pool = WorkerPool.from_groups(
groups, keep_alive_queue_size, start_method=start_method, num_workers=num_workers)
try:
capture_processes(pool)
return closing(pool)
except Exception:
pool.close()
raise
def get_pids(worker_pool):
# Note that we also capture the pids so the setup_function and teardown_function can
# verify its correctness.
return worker_pool.pids()
def assert_scheduled_num(context, num_tasks):
assert len(context.partially_received) == num_tasks
assert len(context.scheduled_minibatches) == num_tasks
assert len(context.task_queue) == num_tasks
start_methods = ["fork", "spawn"]
# Invoke the `fn` with all start methods. Call setup and teardown before and after the test.
#
# We do this to not repeat the pattern of:
#
# def check_somthing(start_method):
# ...
#
# @with_setup(setup_function, teardown_function)
# def test_something():
# for start_method in start_methods:
# yield check_somthing, start_method
def check_pool(fn):
@wraps(fn)
def wrapper():
for start_method in start_methods:
setup_function()
yield fn, start_method
teardown_function()
return wrapper
# ################################################################################################ #
# 1 callback, 1 worker tests
# ################################################################################################ #
@check_pool
def test_pool_one_task(start_method):
groups = [MockGroup.from_callback(simple_callback)]
with create_pool(groups, keep_alive_queue_size=1, num_workers=1,
start_method=start_method) as pool:
pids = get_pids(pool)
pid = pids[0]
tasks = [(SampleInfo(0, 0, 0, 0),)]
work_batch = TaskArgs.make_sample(SampleRange(0, 1, 0, 0))
pool.schedule_batch(context_i=0, work_batch=work_batch)
batch = pool.receive_batch(context_i=0)
for task, sample in zip(tasks, batch):
np.testing.assert_array_equal(answer(pid, *task), sample)
@check_pool
def test_pool_multi_task(start_method):
groups = [MockGroup.from_callback(simple_callback)]
with create_pool(groups, keep_alive_queue_size=1, num_workers=1,
start_method=start_method) as pool:
pids = get_pids(pool)
pid = pids[0]
tasks = [(SampleInfo(i, i, 0, 0),) for i in range(10)]
work_batch = TaskArgs.make_sample(SampleRange(0, 10, 0, 0))
pool.schedule_batch(context_i=0, work_batch=work_batch)
batch = pool.receive_batch(context_i=0)
for task, sample in zip(tasks, batch):
np.testing.assert_array_equal(answer(pid, *task), sample)
# Test that we can safely hold as many results as the keep_alive_queue_size
@check_pool
def test_pool_no_overwrite_batch(start_method):
groups = [MockGroup.from_callback(simple_callback, prefetch_queue_depth=0)]
for depth in [1, 2, 4, 8]:
with create_pool(groups, keep_alive_queue_size=depth, num_workers=1,
start_method=start_method) as pool:
pids = get_pids(pool)
pid = pids[0]
work_batches = [TaskArgs.make_sample(SampleRange(i, i + 1, i, 0)) for i in range(depth)]
task_list = [[(SampleInfo(i, 0, i, 0),)] for i in range(depth)]
for i, work_batch in enumerate(work_batches):
pool.schedule_batch(context_i=0, work_batch=work_batch)
assert_scheduled_num(pool.contexts[0], depth)
batches = []
for i in range(depth):
batches.append(pool.receive_batch(context_i=0))
assert_scheduled_num(pool.contexts[0], depth - 1 - i)
tasks_batches = zip(task_list, batches)
for tasks, batch in tasks_batches:
for task, sample in zip(tasks, batch):
np.testing.assert_array_equal(answer(pid, *task), sample)
# ################################################################################################ #
# 1 callback, multiple workers tests
# ################################################################################################ #
@check_pool
def test_pool_work_split_multiple_tasks(start_method):
callbacks = [MockGroup.from_callback(simple_callback)]
with create_pool(callbacks, keep_alive_queue_size=1, num_workers=2,
start_method=start_method) as pool:
num_tasks = 16
pids = get_pids(pool)
assert len(pids) == 2
work_batch = TaskArgs.make_sample(SampleRange(0, num_tasks, 0, 0))
tasks = [(SampleInfo(i, i, 0, 0),) for i in range(num_tasks)]
pool.schedule_batch(context_i=0, work_batch=work_batch)
batch = pool.receive_batch(context_i=0)
for task, sample in zip(tasks, batch):
np.testing.assert_array_equal(answer(-1, *task)[1:], sample[1:])
# ################################################################################################ #
# multiple callbacks
# ################################################################################################ #
@check_pool
def test_pool_iterator_dedicated_worker(start_method):
groups = [
MockGroup.from_callback(simple_callback, prefetch_queue_depth=3),
MockGroup.from_callback(IteratorCb(), prefetch_queue_depth=3, batch=True)]
with create_pool(groups, keep_alive_queue_size=1, num_workers=4,
start_method=start_method) as pool:
pids = get_pids(pool)
assert len(pids) == 4
tasks_list = []
samples_count = 0
for i in range(4):
tasks = [(SampleInfo(samples_count + j, j, i, 0),) for j in range(i + 1)]
tasks_list.append(tasks)
work_batch = TaskArgs.make_sample(SampleRange(
samples_count, samples_count + i + 1, i, 0))
samples_count += len(tasks)
pool.schedule_batch(context_i=0, work_batch=work_batch)
pool.schedule_batch(context_i=1, work_batch=TaskArgs.make_batch((i,)))
assert pool.contexts[0].dedicated_worker_id is None
iter_worker_num = pool.contexts[1].dedicated_worker_id
iter_worker_pid = pool.pool._processes[iter_worker_num].pid
for i in range(4):
batch_0 = pool.receive_batch(context_i=0)
batch_1 = pool.receive_batch(context_i=1)
tasks = tasks_list[i]
assert len(batch_0) == len(tasks)
assert len(batch_1) == len(tasks)
for task, sample in zip(tasks, batch_0):
np.testing.assert_array_equal(answer(-1, *task)[1:], sample[1:])
for sample in batch_1:
np.testing.assert_array_equal(np.array([iter_worker_pid, i + 1]), sample)
@check_pool
def test_pool_many_ctxs(start_method):
callbacks = [simple_callback, another_callback]
groups = [MockGroup.from_callback(cb) for cb in callbacks]
with create_pool(groups, keep_alive_queue_size=1, num_workers=1,
start_method=start_method) as pool:
pids = get_pids(pool)
pid = pids[0]
tasks = [(SampleInfo(0, 0, 0, 0),)]
work_batch = TaskArgs.make_sample(SampleRange(0, 1, 0, 0))
pool.schedule_batch(context_i=0, work_batch=work_batch)
pool.schedule_batch(context_i=1, work_batch=work_batch)
batch_0 = pool.receive_batch(context_i=0)
batch_1 = pool.receive_batch(context_i=1)
for task, sample, pid in zip(tasks, batch_0, pids):
np.testing.assert_array_equal(answer(pid, *task), sample)
for task, sample, pid in zip(tasks, batch_1, pids):
np.testing.assert_array_equal(answer(pid, *task) + 100, sample)
@check_pool
def test_pool_context_sync(start_method):
callbacks = [simple_callback, another_callback]
groups = [MockGroup.from_callback(cb, prefetch_queue_depth=3) for cb in callbacks]
with create_pool(groups, keep_alive_queue_size=1, num_workers=4,
start_method=start_method) as pool:
capture_processes(pool)
for i in range(4):
tasks = [(SampleInfo(j, 0, 0, 0),) for j in range(10 * (i + 1))]
work_batch = TaskArgs.make_sample(SampleRange(0, 10 * (i + 1), 0, 0))
pool.schedule_batch(context_i=0, work_batch=work_batch)
pool.schedule_batch(context_i=1, work_batch=work_batch)
assert_scheduled_num(pool.contexts[0], 4)
assert_scheduled_num(pool.contexts[1], 4)
# pool after a reset should discard all previously scheduled tasks
# (and sync workers to avoid race on writing to results buffer)
pool.reset()
tasks = [(SampleInfo(1000 + j, j, 0, 1),) for j in range(5)]
work_batch = TaskArgs.make_sample(SampleRange(1000, 1005, 0, 1))
pool.schedule_batch(context_i=0, work_batch=work_batch)
pool.schedule_batch(context_i=1, work_batch=work_batch)
assert_scheduled_num(pool.contexts[0], 1)
assert_scheduled_num(pool.contexts[1], 1)
batch_0 = pool.receive_batch(context_i=0)
batch_1 = pool.receive_batch(context_i=1)
assert len(batch_0) == len(tasks)
assert len(batch_1) == len(tasks)
for task, sample in zip(tasks, batch_0):
np.testing.assert_array_equal(answer(-1, *task)[1:], sample[1:])
for task, sample in zip(tasks, batch_1):
np.testing.assert_array_equal(answer(-1, *task)[1:] + 100, sample[1:])
@with_setup(setup_function, teardown_function)
def _test_multiple_stateful_sources_single_worker(num_workers):
groups = [
MockGroup.from_callback(IteratorCb(), batch=True),
MockGroup.from_callback(IteratorCb(), batch=True)]
with create_pool(groups, keep_alive_queue_size=1, num_workers=num_workers,
start_method="spawn") as pool:
pids = get_pids(pool)
assert len(pids) == min(num_workers, len(groups))
pool.schedule_batch(context_i=0, work_batch=TaskArgs.make_batch((0,)))
pool.schedule_batch(context_i=1, work_batch=TaskArgs.make_batch((0,)))
iter_worker_num_0 = pool.contexts[0].dedicated_worker_id
iter_worker_num_1 = pool.contexts[1].dedicated_worker_id
iter_worker_pid_0 = pool.pool._processes[iter_worker_num_0].pid
iter_worker_pid_1 = pool.pool._processes[iter_worker_num_1].pid
batch_0 = pool.receive_batch(context_i=0)
batch_1 = pool.receive_batch(context_i=1)
np.testing.assert_array_equal(np.array([iter_worker_pid_0, 1]), batch_0[0])
np.testing.assert_array_equal(np.array([iter_worker_pid_1, 1]), batch_1[0])
if num_workers == 1:
assert iter_worker_pid_0 == iter_worker_pid_1
else:
assert iter_worker_pid_0 != iter_worker_pid_1
def test_multiple_stateful_sources_single_worker():
for num_workers in (1, 4):
yield _test_multiple_stateful_sources_single_worker, num_workers
# ################################################################################################ #
# invalid return type
# ################################################################################################ #
def invalid_callback(i):
return "42"
@raises(Exception,
glob="Unsupported callback return type. Expected NumPy array, PyTorch or "
"MXNet cpu tensors, DALI TensorCPU, or list or tuple of them representing sample. Got")
@with_setup(setup_function, teardown_function)
def test_pool_invalid_return():
callbacks = [MockGroup.from_callback(invalid_callback)]
with create_pool(callbacks, keep_alive_queue_size=1, num_workers=1,
start_method="spawn") as pool:
_ = get_pids(pool)
work_batch = TaskArgs.make_sample(SampleRange(0, 1, 0, 0))
pool.schedule_batch(context_i=0, work_batch=work_batch)
pool.receive_batch(context_i=0)
|
DALI-main
|
dali/test/python/test_pool.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import numpy as np
import nvidia.dali.fn as fn
import nvidia.dali.ops as ops
from nvidia.dali import Pipeline
from nose_utils import assert_raises
from test_utils import check_output
datapy = np
make_array = np.array
random_seed = np.random.seed
random_array = np.random.ranf
random_int = np.random.randint
# to use this it is enough to just import all functions from it by
# `from test_internals_operator_external_source import *`
# nose will query for the methods available and will run them
# the code for CPU and GPU input is 99% the same and the biggest
# difference is between importing numpy or cupy so it is better to store everything in one file
# and just call `use_cupy` to switch between the default numpy and cupy
cpu_input = True
def _to_numpy(x):
assert False
def cast_to(x, dtype):
return x.astype(dtype)
def asnumpy(x):
if x is None:
return None
if isinstance(x, list):
return [asnumpy(y) for y in x]
if isinstance(x, np.ndarray):
return x
return _to_numpy(x)
def use_cupy():
global cp
global datapy
global make_array
global _to_numpy
global random_seed
global random_array
global random_int
import cupy as cp
datapy = cp
make_array = cp.array
_to_numpy = cp.asnumpy
random_seed = datapy.random.seed
random_array = datapy.random.ranf
random_int = datapy.random.randint
global cpu_input
cpu_input = False
def use_torch(gpu):
global torch
global datapy
global _to_numpy
global cast_to
import torch
datapy = torch
def torch2numpy(tensor):
return np.array(tensor.cpu())
_to_numpy = torch2numpy
global random_array
def make_torch_tensor(*args, **kwargs):
t = torch.tensor(*args, **kwargs)
return t.cuda() if gpu else t
def torch_cast(x, dtype):
return x.type(dtype)
cast_to = torch_cast
def random_array(shape):
return make_torch_tensor(np.random.ranf(shape))
global make_array
make_array = make_torch_tensor
global cpu_input
cpu_input = not gpu
class TestIterator():
def __init__(self, n, batch_size, dims=[2], as_tensor=False):
self.batch_size = batch_size
self.dims = dims
self.n = n
self.as_tensor = as_tensor
self.i = 0
def __len__(self):
return self.n
def __iter__(self):
# return a copy, so that the iteration number doesn't collide
return TestIterator(self.n, self.batch_size, self.dims, self.as_tensor)
def __next__(self):
random_seed(12345 * self.i + 4321)
def generate(dim):
if self.as_tensor:
shape = random_int(1, 10, [dim]).tolist()
return random_array([self.batch_size] + shape)
else:
return [random_array(random_int(1, 10, [dim]).tolist()) for _ in
range(self.batch_size)]
if self.i < self.n:
self.i += 1
if isinstance(self.dims, (list, tuple)):
return [generate(d) for d in self.dims]
else:
return generate(self.dims)
else:
self.i = 0
raise StopIteration
next = __next__
class SampleIterator():
def __init__(self, batch_iterator, is_multioutput=False):
self.src = batch_iterator
self.is_multioutput = is_multioutput
self.batch = ([],) if is_multioutput else []
self.idx = 0
def __iter__(self):
return SampleIterator(iter(self.src), self.is_multioutput)
def __next__(self):
batch_size = len(self.batch[0]) if self.is_multioutput else len(self.batch)
if self.idx >= batch_size:
self.idx = 0
self.batch = next(self.src)
if self.is_multioutput:
ret = tuple(b[self.idx] for b in self.batch)
else:
ret = self.batch[self.idx]
self.idx += 1
return ret
next = __next__
def run_and_check(pipe, ref_iterable):
iter_ref = iter(ref_iterable)
i = 0
while True:
try:
pipe_out = pipe.run()
data = next(iter_ref)
data = asnumpy(data)
check_output(pipe_out, data)
i += 1
except StopIteration:
break
assert i == len(ref_iterable)
def _test_iter_setup(use_fn_api, by_name, device):
batch_size = 7
class IterSetupPipeline(Pipeline):
def __init__(self, iterator, num_threads, device_id, device):
super(IterSetupPipeline, self).__init__(
batch_size=iterator.batch_size,
num_threads=num_threads,
device_id=device_id)
self.iterator = iterator
self._device = device
def define_graph(self):
if use_fn_api:
self.batch_1 = fn.external_source(device=self._device, name="src1")
self.batch_2 = fn.external_source(device=self._device, name="src2")
else:
input_1 = ops.ExternalSource(device=self._device)
input_2 = ops.ExternalSource(device=self._device)
self.batch_1 = input_1(name="src1")
self.batch_2 = input_2(name="src2")
return [self.batch_1, self.batch_2]
def iter_setup(self):
batch_1, batch_2 = next(self.iterator)
if by_name:
self.feed_input("src1", batch_1)
self.feed_input("src2", batch_2)
else:
self.feed_input(self.batch_1, batch_1)
self.feed_input(self.batch_2, batch_2)
iter_num = 5
source = TestIterator(iter_num, batch_size, [2, 3])
pipe = IterSetupPipeline(iter(source), 3, 0, device)
pipe.build()
run_and_check(pipe, source)
def test_iter_setup():
for use_fn_api in [False, True]:
for by_name in [False, True]:
for device in ["cpu", "gpu"]:
yield _test_iter_setup, use_fn_api, by_name, device
def _test_external_source_callback(use_fn_api, batch, as_tensor, device):
iter_num = 5
batch_size = 9
pipe = Pipeline(batch_size, 3, 0)
# this should produce a single Tensor / TensorList per batch,
# not wrapped in additional list
source = TestIterator(iter_num, batch_size, 4, device == "gpu")
iter_in = iter(source) if batch else iter(SampleIterator(iter(source)))
if use_fn_api:
input = fn.external_source(lambda: next(iter_in), device=device, batch=batch)
else:
ext_source = ops.ExternalSource(lambda: next(iter_in), device=device, batch=batch)
input = ext_source()
pipe.set_outputs(input)
pipe.build()
run_and_check(pipe, source)
def test_external_source_callback():
for use_fn_api in [False, True]:
for device in ["cpu", "gpu"]:
for batch in [True, False]:
for as_tensor in [False, True]:
yield _test_external_source_callback, use_fn_api, batch, as_tensor, device
def _test_external_source_callback_split(use_fn_api, batch, as_tensor, device):
iter_num = 5
batch_size = 9
pipe = Pipeline(batch_size, 3, 0)
# this should produce a two-element list of Tensor(Lists), the first
# being 2D, the second being 3D (+ batch dimension)
source = TestIterator(iter_num, batch_size, [2, 3], as_tensor)
iter_in = iter(source) if batch else iter(SampleIterator(iter(source), True))
if use_fn_api:
inputs = fn.external_source(lambda: next(iter_in), 2, device=device, batch=batch)
else:
ext_source = ops.ExternalSource(lambda: next(iter_in), num_outputs=2, device=device,
batch=batch)
inputs = ext_source()
pipe.set_outputs(*inputs)
pipe.build()
run_and_check(pipe, source)
def test_external_source_callback_split():
for use_fn_api in [False, True]:
for device in ["cpu", "gpu"]:
for batch in [True, False]:
for as_tensor in [False, True]:
yield _test_external_source_callback_split, use_fn_api, batch, as_tensor, device
def _test_external_source_iter(use_fn_api, device):
iter_num = 5
batch_size = 9
pipe = Pipeline(batch_size, 3, 0)
# this should produce a single Tensor / TensorList per batch,
# not wrapped in additional list
source = TestIterator(iter_num, batch_size, 4, device == "gpu")
if use_fn_api:
input = fn.external_source(source, device=device)
else:
ext_source = ops.ExternalSource(source, device=device)
input = ext_source()
pipe.set_outputs(input)
pipe.build()
run_and_check(pipe, source)
def test_external_source_iter():
for use_fn_api in [False, True]:
for device in ["cpu", "gpu"]:
yield _test_external_source_iter, use_fn_api, device
def _test_external_source_iter_split(use_fn_api, device):
iter_num = 5
batch_size = 9
pipe = Pipeline(batch_size, 3, 0)
# this should produce a three-element list of Tensor(Lists), the first
# being 4D, the second being 2D and the third 3D (+ batch dimension)
source = TestIterator(iter_num, batch_size, [4, 2, 3], device == "gpu")
if use_fn_api:
inputs = fn.external_source(source, 3, device=device)
else:
ext_source = ops.ExternalSource(source, num_outputs=3, device=device)
inputs = ext_source()
pipe.set_outputs(*inputs)
pipe.build()
run_and_check(pipe, source)
def test_external_source_iter_split():
for use_fn_api in [False, True]:
for device in ["cpu", "gpu"]:
yield _test_external_source_iter_split, use_fn_api, device
def test_external_source_collection():
pipe = Pipeline(1, 3, 0)
batches = [
[make_array([1.5, 2.5], dtype=datapy.float32)],
[make_array([-1, 3.5, 4.5], dtype=datapy.float32)]
]
pipe.set_outputs(fn.external_source(batches))
pipe.build()
run_and_check(pipe, batches)
def test_external_source_iterate_ndarray():
pipe = Pipeline(4, 3, 0)
batch = make_array([1.5, 2.5, 2, 3], dtype=datapy.float32)
pipe.set_outputs(fn.external_source(batch, batch=False))
pipe.build()
run_and_check(pipe, [batch])
def test_external_source_collection_cycling():
pipe = Pipeline(1, 3, 0)
batches = [
[make_array([1.5, 2.5], dtype=datapy.float32)],
[make_array([-1, 3.5, 4.5], dtype=datapy.float32)]
]
pipe.set_outputs(fn.external_source(batches, cycle=True))
pipe.build()
# epochs are cycles over the source iterable
for _ in range(3):
for batch in batches:
batch = asnumpy(batch)
check_output(pipe.run(), batch)
def test_external_source_collection_cycling_raise():
pipe = Pipeline(1, 3, 0, prefetch_queue_depth=1)
batches = [
[make_array([1.5, 2.5], dtype=datapy.float32)],
[make_array([-1, 3.5, 4.5], dtype=datapy.float32)]
]
def batch_gen():
for b in batches:
yield b
pipe.set_outputs(fn.external_source(batches, cycle="raise"),
fn.external_source(batch_gen, cycle="raise"))
pipe.build()
# epochs are cycles over the source iterable
for _ in range(3):
for batch in batches:
pipe_out = pipe.run()
batch = asnumpy(batch)
batch = batch, batch
check_output(pipe_out, batch)
with assert_raises(StopIteration):
pipe.run()
pipe.reset()
def test_external_source_with_iter():
for attempt in range(10):
pipe = Pipeline(1, 3, 0)
pipe.set_outputs(fn.external_source(
lambda i: [make_array([attempt * 100 + i * 10 + 1.5], dtype=datapy.float32)]))
pipe.build()
for i in range(10):
check_output(pipe.run(), [np.array([attempt * 100 + i * 10 + 1.5], dtype=np.float32)])
def test_external_source_with_sample_info():
batch_size = 7
for attempt in range(10):
pipe = Pipeline(batch_size, 3, 0)
def src(si):
assert si.idx_in_epoch == batch_size * si.iteration + si.idx_in_batch
return make_array([attempt * 100 + si.iteration * 10 + si.idx_in_batch + 1.5],
dtype=datapy.float32)
pipe.set_outputs(fn.external_source(src, batch=False))
pipe.build()
for i in range(10):
batch = [np.array([attempt * 100 + i * 10 + s + 1.5], dtype=np.float32)
for s in range(batch_size)]
check_output(pipe.run(), batch)
def test_external_source_generator():
pipe = Pipeline(1, 3, 0)
def gen():
for i in range(5):
yield [make_array([i + 1.5], dtype=datapy.float32)]
pipe.set_outputs(fn.external_source(gen()))
pipe.build()
for i in range(5):
check_output(pipe.run(), [np.array([i + 1.5], dtype=np.float32)])
def test_external_source_gen_function_cycle():
pipe = Pipeline(1, 3, 0)
def gen():
for i in range(5):
yield [make_array([i + 1.5], dtype=datapy.float32)]
pipe.set_outputs(fn.external_source(gen, cycle=True))
pipe.build()
for _ in range(3):
for i in range(5):
check_output(pipe.run(), [np.array([i + 1.5], dtype=np.float32)])
def test_external_source_gen_function_partial():
pipe = Pipeline(1, 3, 0)
def gen(base):
for i in range(5):
yield [make_array([i + base], dtype=datapy.float32)]
pipe.set_outputs(fn.external_source(functools.partial(gen, 1.5), cycle=True))
pipe.build()
for _ in range(3):
for i in range(5):
check_output(pipe.run(), [np.array([i + 1.5], dtype=np.float32)])
def test_external_source_generator_cycle_error():
_ = Pipeline(1, 3, 0)
def gen():
for i in range(5):
yield [make_array([i + 1.5], dtype=datapy.float32)]
fn.external_source(gen(), cycle=False) # no cycle - OK
with assert_raises(TypeError,
glob="Cannot cycle through a generator * pass that function instead as `source`."): # noqa: E501
fn.external_source(gen(), cycle=True) # cycle over generator - error expected
def test_external_source():
class TestIterator():
def __init__(self, n):
self.n = n
def __iter__(self):
self.i = 0
return self
def __next__(self):
batch_1 = []
batch_2 = []
if self.i < self.n:
batch_1.append(datapy.arange(0, 1, dtype=datapy.float32))
batch_2.append(datapy.arange(0, 1, dtype=datapy.float32))
self.i += 1
return batch_1, batch_2
else:
self.i = 0
raise StopIteration
next = __next__
class IterSetupPipeline(Pipeline):
def __init__(self, iterator, num_threads, device_id):
super().__init__(1, num_threads, device_id)
self.input_1 = ops.ExternalSource()
self.input_2 = ops.ExternalSource()
self.iterator = iterator
def define_graph(self):
self.batch_1 = self.input_1()
self.batch_2 = self.input_2()
return [self.batch_1, self.batch_2]
def iter_setup(self):
batch_1, batch_2 = next(self.iterator)
self.feed_input(self.batch_1, batch_1)
self.feed_input(self.batch_2, batch_2)
iter_num = 5
iterator = iter(TestIterator(iter_num))
pipe = IterSetupPipeline(iterator, 3, 0)
pipe.build()
i = 0
while True:
try:
pipe.run()
i += 1
except StopIteration:
break
assert iter_num == i
def test_external_source_fail_missing_output():
class ExternalSourcePipeline(Pipeline):
def __init__(self, batch_size, external_s_size, num_threads, device_id):
super().__init__(batch_size, num_threads, device_id)
self.input = ops.ExternalSource()
self.input_2 = ops.ExternalSource()
self.batch_size_ = batch_size
self.external_s_size_ = external_s_size
def define_graph(self):
self.batch = self.input()
self.batch_2 = self.input_2()
return [self.batch]
def iter_setup(self):
batch = datapy.zeros([self.external_s_size_, 4, 5])
self.feed_input(self.batch, batch)
self.feed_input(self.batch_2, batch)
batch_size = 3
pipe = ExternalSourcePipeline(batch_size, batch_size, 3, 0)
pipe.build()
assert_raises(RuntimeError, pipe.run,
regex=r"Cannot find [\w]+ tensor, it doesn't exists or was pruned as unused one")
def external_data_veri(external_data, batch_size):
class ExternalSourcePipeline(Pipeline):
def __init__(self, batch_size, external_data, num_threads, device_id):
super().__init__(batch_size, num_threads, device_id)
self.input = ops.ExternalSource()
self.batch_size_ = batch_size
self.external_data = external_data
def define_graph(self):
self.batch = self.input()
return [self.batch]
def iter_setup(self):
batch = []
for elm in self.external_data:
batch.append(make_array(elm, dtype=datapy.uint8))
self.feed_input(self.batch, batch)
pipe = ExternalSourcePipeline(batch_size, external_data, 3, 0)
pipe.build()
for _ in range(10):
out = pipe.run()
for i in range(batch_size):
assert out[0].as_array()[i] == external_data[i]
def test_external_source_scalar_list():
batch_size = 3
label_data = 10
lists = []
scalars = []
for i in range(batch_size):
lists.append([label_data + i])
scalars.append(label_data + i * 10)
for external_data in [lists, scalars]:
yield external_data_veri, external_data, batch_size
def test_external_source_gpu():
class ExternalSourcePipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, use_list):
super().__init__(batch_size, num_threads, device_id)
self.input = ops.ExternalSource(device="gpu")
self.crop = ops.Crop(device="gpu", crop_h=32, crop_w=32, crop_pos_x=0.2, crop_pos_y=0.2)
self.use_list = use_list
def define_graph(self):
self.batch = self.input()
output = self.crop(self.batch)
return output
def iter_setup(self):
if use_list:
batch_data = [cast_to(random_array([100, 100, 3]) * 256, datapy.uint8)
for _ in range(self.batch_size)]
else:
batch_data = cast_to(random_array([self.batch_size, 100, 100, 3]) * 256,
datapy.uint8)
self.feed_input(self.batch, batch_data, layout="HWC")
for batch_size in [1, 10]:
for use_list in (True, False):
pipe = ExternalSourcePipeline(batch_size, 3, 0, use_list)
pipe.build()
pipe.run()
class TestIteratorZeroCopy():
def __init__(self, n, batch_size, dims=[2], as_tensor=False, num_keep_samples=2):
self.batch_size = batch_size
self.dims = dims
self.n = n
self.as_tensor = as_tensor
self.i = 0
self.data = []
self.num_keep_samples = num_keep_samples
def __len__(self):
return self.n
def __iter__(self):
# return a copy, so that the iteration number doesn't collide
return TestIteratorZeroCopy(self.n, self.batch_size, self.dims, self.as_tensor,
self.num_keep_samples)
def __next__(self):
random_seed(12345 * self.i + 4321)
def generate(dim):
shape = random_int(1, 10, [dim]).tolist()
if self.as_tensor:
return random_array([self.batch_size] + shape)
else:
return [random_array(shape) for _ in range(self.batch_size)]
if self.i < self.n:
self.i += 1
if isinstance(self.dims, (list, tuple)):
data = [generate(d) for d in self.dims]
else:
data = generate(self.dims)
# it needs to keep data alive
self.data.append(data)
def add_one(x):
if isinstance(x, list):
for elm in x:
elm = add_one(elm)
else:
x += 1
return x
if len(self.data) > self.num_keep_samples:
tmp = self.data.pop(0)
# change poped data to make sure it is corrupted
tmp = add_one(tmp)
return data
else:
self.i = 0
raise StopIteration
next = __next__
def _test_iter_setup_zero_copy(use_fn_api, by_name, as_tensor, device, additional_num_keep_samples):
batch_size = 7
prefetch_queue_depth = 5
class IterSetupPipeline(Pipeline):
def __init__(self, iterator, num_threads, device_id, device, prefetch_queue_depth=2):
super().__init__(
batch_size=iterator.batch_size,
num_threads=num_threads,
device_id=device_id,
prefetch_queue_depth=prefetch_queue_depth)
self.iterator = iterator
self._device = device
def define_graph(self):
if use_fn_api:
self.batch_1 = fn.external_source(device=self._device, name="src1", no_copy=True)
self.batch_2 = fn.external_source(device=self._device, name="src2", no_copy=True)
else:
input_1 = ops.ExternalSource(device=self._device, no_copy=True)
input_2 = ops.ExternalSource(device=self._device, no_copy=True)
self.batch_1 = input_1(name="src1")
self.batch_2 = input_2(name="src2")
return [self.batch_1, self.batch_2]
def iter_setup(self):
batch_1, batch_2 = next(self.iterator)
if by_name:
self.feed_input("src1", batch_1)
self.feed_input("src2", batch_2)
else:
self.feed_input(self.batch_1, batch_1)
self.feed_input(self.batch_2, batch_2)
iter_num = 10
# it is enough to keep only ``prefetch_queue_depth`` or ``cpu_queue_depth * gpu_queue_depth``
# (when they are not equal), but they are equal in this case
num_keep_samples = prefetch_queue_depth + additional_num_keep_samples
source = TestIteratorZeroCopy(iter_num, batch_size, [2, 3], as_tensor=as_tensor,
num_keep_samples=num_keep_samples)
pipe = IterSetupPipeline(iter(source), 3, 0, device, prefetch_queue_depth)
pipe.build()
if (device == "cpu" and not cpu_input) or (device == "gpu" and cpu_input):
input_types = ["CPU", "GPU"]
if device == "cpu" and not cpu_input:
input_types.reverse()
assert_raises(RuntimeError, pipe.run,
glob="no_copy is supported only for the same data source device type as "
"operator. Received: {} input for {} operator".format(*input_types))
elif additional_num_keep_samples < 0 and not (
device == "gpu" and not cpu_input and not as_tensor):
# for the GPU2GPU non contiguous input DALI makes an internal copy on provided stream so no
# data needs to be preserved by the user
# assert_raises doesn't work here for the assertions from the test_utils.py
if_raised = False
try:
# this tests bases on the race condition. Running it 5 times make this race more
# likely to happen and tests pass in CI under high CPU load
iterations = 5
for _ in range(iterations):
run_and_check(pipe, source)
except AssertionError:
if_raised = True
assert if_raised
else:
run_and_check(pipe, source)
def test_iter_setup_zero_copy():
for use_fn_api in [False, True]:
for by_name in [False, True]:
for as_tensor in [False, True]:
for device in ["cpu", "gpu"]:
# make it -4 as -1 sometimes fails due to being close to the limit
for additional_num_keep_samples in [-4, 0, 1]:
yield _test_iter_setup_zero_copy, use_fn_api, by_name, as_tensor, device, \
additional_num_keep_samples
|
DALI-main
|
dali/test/python/test_external_source_impl.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import argparse
import time
data_paths = ["/data/imagenet/train-jpeg"]
class RN50Pipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus,
data_paths, prefetch, fp16, nhwc):
super(RN50Pipeline, self).__init__(batch_size, num_threads, device_id,
prefetch_queue_depth=prefetch)
self.input = ops.readers.File(file_root=data_paths[0], shard_id=device_id,
num_shards=num_gpus)
self.decode_gpu = ops.decoders.Image(device="mixed", output_type=types.RGB)
self.res = ops.RandomResizedCrop(device="gpu", size=(224, 224))
layout = types.args.nhwc if nhwc else types.NCHW
out_type = types.FLOAT16 if fp16 else types.FLOAT
self.cmnp = ops.CropMirrorNormalize(device="gpu",
dtype=out_type,
output_layout=layout,
crop=(224, 224),
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
self.coin = ops.random.CoinFlip(probability=0.5)
def define_graph(self):
rng = self.coin()
jpegs, labels = self.input(name="Reader")
images = self.decode_gpu(jpegs)
images = self.res(images)
output = self.cmnp(images.gpu(), mirror=rng)
return (output, labels.gpu())
parser = argparse.ArgumentParser(
description='Test RN50 augmentation pipeline with different FW iterators'
)
parser.add_argument('-g', '--gpus', default=1, type=int, metavar='N',
help='number of GPUs (default: 1)')
parser.add_argument('-b', '--batch_size', default=13, type=int, metavar='N',
help='batch size (default: 13)')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('-j', '--workers', default=3, type=int, metavar='N',
help='number of data loading workers (default: 3)')
parser.add_argument('--prefetch', default=2, type=int, metavar='N',
help='prefetch queue depth (default: 2)')
parser.add_argument('--separate_queue', action='store_true',
help='Use separate queues executor')
parser.add_argument('--cpu_size', default=2, type=int, metavar='N',
help='cpu prefetch queue depth (default: 2)')
parser.add_argument('--gpu_size', default=2, type=int, metavar='N',
help='gpu prefetch queue depth (default: 2)')
parser.add_argument('--fp16', action='store_true',
help='Run fp16 pipeline')
parser.add_argument('--nhwc', action='store_true',
help='Use args.nhwc data instead of default NCHW')
parser.add_argument('-i', '--iters', default=-1, type=int, metavar='N',
help='Number of iterations to run (default: -1 - whole data set)')
parser.add_argument('-e', '--epochs', default=1, type=int, metavar='N',
help='Number of epochs to run (default: 1)')
parser.add_argument('--framework', type=str)
args = parser.parse_args()
print(f"Framework: {args.framework}, GPUs: {args.gpus}, batch: {args.batch_size}, "
f"workers: {args.workers}, prefetch depth: {args.prefetch}, "
f"loging interval: {args.print_freq}, fp16: {args.fp16}, args.nhwc: {args.nhwc}")
PREFETCH = args.prefetch
if args.separate_queue:
PREFETCH = {'cpu_size': args.cpu_size, 'gpu_size': args.gpu_size}
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.avg_last_n = 0
self.max_val = 0
def update(self, val, n=1):
self.val = val
self.max_val = max(self.max_val, val)
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def test_fw_iter(IteratorClass, args):
iterator_name = IteratorClass.__module__ + "." + IteratorClass.__name__
print("Start testing {}".format(iterator_name))
sess = None
daliop = None
dali_train_iter = None
images = []
labels = []
pipes = [RN50Pipeline(batch_size=args.batch_size, num_threads=args.workers, device_id=n,
num_gpus=args.gpus, data_paths=data_paths, prefetch=PREFETCH,
fp16=args.fp16, nhwc=args.nhwc) for n in range(args.gpus)]
[pipe.build() for pipe in pipes]
iters = args.iters
if args.iters < 0:
iters = pipes[0].epoch_size("Reader")
assert all(pipe.epoch_size("Reader") == iters for pipe in pipes)
iters_tmp = iters
iters = iters // args.batch_size
if iters_tmp != iters * args.batch_size:
iters += 1
iters_tmp = iters
iters = iters // args.gpus
if iters_tmp != iters * args.gpus:
iters += 1
if iterator_name == "nvidia.dali.plugin.tf.DALIIterator":
daliop = IteratorClass()
for dev in range(args.gpus):
with tf.device('/gpu:%i' % dev):
if args.fp16:
out_type = tf.float16
else:
out_type = tf.float32
image, label = daliop(pipeline=pipes[dev],
shapes=[(args.batch_size, 3, 224, 224), ()],
dtypes=[out_type, tf.int32])
images.append(image)
labels.append(label)
gpu_options = GPUOptions(per_process_gpu_memory_fraction=0.5)
config = ConfigProto(gpu_options=gpu_options)
sess = Session(config=config)
end = time.time()
for i in range(args.epochs):
if i == 0:
print("Warm up")
else:
print("Test run " + str(i))
data_time = AverageMeter()
if iterator_name == "nvidia.dali.plugin.tf.DALIIterator":
assert sess is not None
for j in range(iters):
sess.run([images, labels])
data_time.update(time.time() - end)
if j % args.print_freq == 0:
speed = args.gpus * args.batch_size / data_time.avg
print(f"{iterator_name} {j + 1}/ {iters}, avg time: {data_time.avg} [s], "
f"worst time: {data_time.max_val} [s], speed: {speed} [img/s]")
end = time.time()
else:
dali_train_iter = IteratorClass(pipes, reader_name="Reader")
j = 0
for it in iter(dali_train_iter):
data_time.update(time.time() - end)
if j % args.print_freq == 0:
speed = args.gpus * args.batch_size / data_time.avg
print(f"{iterator_name} {j + 1}/ {iters}, avg time: {data_time.avg} [s], "
f"worst time: {data_time.max_val} [s], speed: {speed} [img/s]")
end = time.time()
j = j + 1
if j > iters:
break
def import_mxnet():
from nvidia.dali.plugin.mxnet import DALIClassificationIterator as MXNetIterator
return MXNetIterator
def import_pytorch():
from nvidia.dali.plugin.pytorch import DALIClassificationIterator as PyTorchIterator
return PyTorchIterator
def import_paddle():
from nvidia.dali.plugin.paddle import DALIClassificationIterator as PaddleIterator
return PaddleIterator
def import_tf():
global tf
global GPUOptions
global ConfigProto
global Session
from nvidia.dali.plugin.tf import DALIIterator as TensorFlowIterator
import tensorflow as tf
try:
from tensorflow.compat.v1 import GPUOptions
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import Session
except ImportError:
# Older TF versions don't have compat.v1 layer
from tensorflow import GPUOptions
from tensorflow import ConfigProto
from tensorflow import Session
try:
tf.compat.v1.disable_eager_execution()
except NameError:
pass
return TensorFlowIterator
Iterators = {
"mxnet": [import_mxnet],
"pytorch": [import_pytorch],
"tf": [import_tf],
"paddle": [import_paddle]
}
assert args.framework in Iterators, "Error, framework {} not supported".format(args.framework)
for imports in Iterators[args.framework]:
IteratorClass = imports()
test_fw_iter(IteratorClass, args)
|
DALI-main
|
dali/test/python/test_RN50_data_fw_iterators.py
|
# Copyright (c) 2017-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
from nvidia.dali import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import nvidia.dali.tfrecord as tfrec
import nvidia.dali as dali
from nvidia.dali import pipeline_def
import numpy as np
from numpy.testing import assert_array_equal
import os
import random
from math import floor, ceil
import sys
import warnings
from webdataset_base import generate_temp_index_file as generate_temp_wds_index
from test_utils import (
check_batch, as_array, compare_pipelines,
get_dali_extra_path, RandomDataIterator)
from nose_utils import raises
from nose_utils import assert_raises
from nose.plugins.skip import SkipTest
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
caffe_no_label_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
c2lmdb_db_folder = os.path.join(test_data_root, 'db', 'c2lmdb')
c2lmdb_no_label_db_folder = os.path.join(test_data_root, 'db', 'c2lmdb_no_label')
recordio_db_folder = os.path.join(test_data_root, 'db', 'recordio')
tfrecord_db_folder = os.path.join(test_data_root, 'db', 'tfrecord')
jpeg_folder = os.path.join(test_data_root, 'db', 'single', 'jpeg')
coco_image_folder = os.path.join(test_data_root, 'db', 'coco', 'images')
coco_annotation_file = os.path.join(test_data_root, 'db', 'coco', 'instances.json')
test_data_video = os.path.join(test_data_root, 'db', 'optical_flow', 'sintel_trailer')
webdataset_db_folder = os.path.join(test_data_root, 'db', 'webdataset', 'MNIST')
def test_tensor_multiple_uses():
batch_size = 128
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus):
super(HybridPipe, self).__init__(batch_size,
num_threads,
device_id)
self.input = ops.readers.Caffe(
path=caffe_db_folder, shard_id=device_id, num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.res = ops.Resize(device="cpu", resize_x=224, resize_y=224)
self.dump_cpu = ops.DumpImage(device="cpu", suffix="cpu")
self.dump_gpu = ops.DumpImage(device="gpu", suffix="gpu")
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
images = self.res(images)
images_cpu = self.dump_cpu(images)
images_gpu = self.dump_gpu(images.gpu())
return (images, images_cpu, images_gpu)
pipe = HybridPipe(batch_size=batch_size, num_threads=1, device_id=0, num_gpus=1)
pipe.build()
out = pipe.run()
assert out[0].is_dense_tensor()
assert out[1].is_dense_tensor()
assert out[2].is_dense_tensor()
assert out[0].as_tensor().shape() == out[1].as_tensor().shape()
assert out[0].as_tensor().shape() == out[2].as_tensor().shape()
a_raw = out[0]
a_cpu = out[1]
a_gpu = out[2].as_cpu()
for i in range(batch_size):
t_raw = a_raw.at(i)
t_cpu = a_cpu.at(i)
assert np.sum(np.abs(t_cpu - t_raw)) == 0
t_cpu = a_cpu.at(i)
t_gpu = a_gpu.at(i)
assert np.sum(np.abs(t_cpu - t_gpu)) == 0
def test_multiple_input_sets():
batch_size = 32
file_root = os.path.join(test_data_root, 'db', 'coco', 'images')
annotations_file = os.path.join(test_data_root, 'db', 'coco', 'instances.json')
class MISPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus):
super(MISPipe, self).__init__(batch_size, num_threads, device_id, num_gpus)
# Reading COCO dataset
self.input = ops.readers.COCO(
file_root=file_root,
annotations_file=annotations_file,
shard_id=device_id,
num_shards=num_gpus,
ratio=True,
ltrb=True,
random_shuffle=False)
self.decode_cpu = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.decode_crop = ops.decoders.ImageSlice(device="cpu", output_type=types.RGB)
self.ssd_crop = ops.SSDRandomCrop(device="cpu", num_attempts=1, seed=0)
default_boxes = [0.0, 0.0, 1.0, 1.0]
self.box_encoder_cpu = ops.BoxEncoder(device="cpu", criteria=0.5, anchors=default_boxes)
def define_graph(self):
# Do separate augmentations
inputs0, boxes0, labels0 = self.input(name="Reader0")
image0 = self.decode_cpu(inputs0)
image_ssd0, boxes_ssd0, labels_ssd0 = self.ssd_crop(image0, boxes0, labels0)
inputs1, boxes1, labels1 = self.input(name="Reader1")
image1 = self.decode_cpu(inputs1)
image_ssd1, boxes_ssd1, labels_ssd1 = self.ssd_crop(image1, boxes1, labels1)
encoded_boxes0, encoded_labels0 = self.box_encoder_cpu(boxes_ssd0, labels_ssd0)
encoded_boxes1, encoded_labels1 = self.box_encoder_cpu(boxes_ssd1, labels_ssd1)
# Pack into Multiple Input Sets and gather multiple output lists
boxes = [boxes_ssd0, boxes_ssd1]
labels = [labels_ssd0, labels_ssd1]
enc_boxes0, enc_labels0 = self.box_encoder_cpu(boxes, labels)
# Test one list with one DataNode
enc_boxes1, enc_labels1 = self.box_encoder_cpu(boxes, labels_ssd0)
# Return everything (only DataNode allowed)
return (encoded_boxes0, encoded_labels0, encoded_boxes1, encoded_labels1,
enc_boxes0[0], enc_labels0[0], enc_boxes0[1], enc_labels0[1],
enc_boxes1[0], enc_labels1[0], enc_boxes1[1], enc_labels1[1])
pipe = MISPipe(batch_size=batch_size, num_threads=1, device_id=0, num_gpus=1)
pipe.build()
out = pipe.run()
for i in range(batch_size):
for j in range(0, len(out) - 2, 2):
# All boxes should be the same
assert np.array_equal(out[j].at(i), out[j + 2].at(i))
# All labels should be the same
assert np.array_equal(out[j + 1].at(i), out[j + 3].at(i))
def test_pipeline_separated_exec_setup():
batch_size = 128
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus, prefetch_queue_depth):
super(HybridPipe, self).__init__(batch_size,
num_threads,
device_id,
prefetch_queue_depth=prefetch_queue_depth)
self.input = ops.readers.Caffe(
path=caffe_db_folder, shard_id=device_id, num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.res = ops.Resize(device="cpu", resize_x=224, resize_y=224)
self.dump_cpu = ops.DumpImage(device="cpu", suffix="cpu")
self.dump_gpu = ops.DumpImage(device="gpu", suffix="gpu")
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
images = self.res(images)
images_cpu = self.dump_cpu(images)
images_gpu = self.dump_gpu(images.gpu())
return (images, images_cpu, images_gpu)
pipe = HybridPipe(batch_size=batch_size, num_threads=1, device_id=0, num_gpus=1,
prefetch_queue_depth={
"cpu_size": 5,
"gpu_size": 3
})
pipe.build()
out = pipe.run()
assert out[0].is_dense_tensor()
assert out[1].is_dense_tensor()
assert out[2].is_dense_tensor()
assert out[0].as_tensor().shape() == out[1].as_tensor().shape()
assert out[0].as_tensor().shape() == out[2].as_tensor().shape()
a_raw = out[0]
a_cpu = out[1]
a_gpu = out[2].as_cpu()
for i in range(batch_size):
t_raw = a_raw.at(i)
t_cpu = a_cpu.at(i)
assert np.sum(np.abs(t_cpu - t_raw)) == 0
t_cpu = a_cpu.at(i)
t_gpu = a_gpu.at(i)
assert np.sum(np.abs(t_cpu - t_gpu)) == 0
def test_pipeline_simple_sync_no_prefetch():
batch_size = 16
n_iters = 12
class HybridPipe(Pipeline):
def __init__(self, batch_size):
super(HybridPipe, self).__init__(batch_size,
num_threads=1,
device_id=0, prefetch_queue_depth=1,
exec_async=False, exec_pipelined=False)
self.input = ops.readers.Caffe(path=caffe_db_folder)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.dump_gpu = ops.DumpImage(device="gpu", suffix="gpu")
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
images_gpu = self.dump_gpu(images.gpu())
return (images, images_gpu)
pipe = HybridPipe(batch_size=batch_size)
pipe.build()
for _ in range(n_iters):
pipe.run()
def test_use_twice():
batch_size = 128
class Pipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus):
super(Pipe, self).__init__(batch_size, num_threads, device_id)
self.input = ops.readers.Caffe(
path=caffe_db_folder, shard_id=device_id, num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.res = ops.Resize(device="cpu", resize_x=224, resize_y=224)
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
images0 = self.res(images)
images1 = self.res(images)
return (images0, images1)
pipe = Pipe(batch_size=batch_size, num_threads=1, device_id=0, num_gpus=1)
pipe.build()
out = pipe.run()
assert out[0].is_dense_tensor()
assert out[1].is_dense_tensor()
assert out[0].as_tensor().shape() == out[1].as_tensor().shape()
for i in range(batch_size):
assert np.array_equal(out[0].at(i), out[0].at(i))
def test_cropmirrornormalize_layout():
batch_size = 128
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus):
super(HybridPipe, self).__init__(batch_size,
num_threads,
device_id)
self.input = ops.readers.Caffe(
path=caffe_db_folder, shard_id=device_id, num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.cmnp_nhwc = ops.CropMirrorNormalize(device="gpu",
dtype=types.FLOAT,
output_layout=types.NHWC,
crop=(224, 224),
mean=[128., 128., 128.],
std=[1., 1., 1.])
self.cmnp_nchw = ops.CropMirrorNormalize(device="gpu",
dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(224, 224),
mean=[128., 128., 128.],
std=[1., 1., 1.])
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
output_nhwc = self.cmnp_nhwc(images.gpu())
output_nchw = self.cmnp_nchw(images.gpu())
return (output_nchw, output_nhwc)
pipe = HybridPipe(batch_size=batch_size, num_threads=1, device_id=0, num_gpus=1)
pipe.build()
out = pipe.run()
assert out[0].is_dense_tensor()
assert out[1].is_dense_tensor()
shape_nchw = out[0].as_tensor().shape()
shape_nhwc = out[1].as_tensor().shape()
assert shape_nchw[0] == shape_nhwc[0]
a_nchw = out[0].as_cpu()
a_nhwc = out[1].as_cpu()
for i in range(batch_size):
t_nchw = a_nchw.at(i)
t_nhwc = a_nhwc.at(i)
assert t_nchw.shape == (3, 224, 224)
assert t_nhwc.shape == (224, 224, 3)
assert np.sum(np.abs(np.transpose(t_nchw, (1, 2, 0)) - t_nhwc)) == 0
def test_cropmirrornormalize_pad():
batch_size = 128
class HybridPipe(Pipeline):
def __init__(self, layout, batch_size, num_threads, device_id, num_gpus):
super(HybridPipe, self).__init__(batch_size,
num_threads,
device_id)
self.input = ops.readers.Caffe(
path=caffe_db_folder, shard_id=device_id, num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.cmnp_pad = ops.CropMirrorNormalize(device="gpu",
dtype=types.FLOAT,
output_layout=layout,
crop=(224, 224),
mean=[128., 128., 128.],
std=[1., 1., 1.],
pad_output=True)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
dtype=types.FLOAT,
output_layout=layout,
crop=(224, 224),
mean=[128., 128., 128.],
std=[1., 1., 1.],
pad_output=False)
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
output_pad = self.cmnp_pad(images.gpu())
output = self.cmnp(images.gpu())
return (output, output_pad)
for layout in [types.NCHW, types.NHWC]:
pipe = HybridPipe(layout, batch_size=batch_size, num_threads=1, device_id=0, num_gpus=1)
pipe.build()
out = pipe.run()
assert out[0].is_dense_tensor()
assert out[1].is_dense_tensor()
shape = out[0].as_tensor().shape()
shape_pad = out[1].as_tensor().shape()
assert shape[0] == shape_pad[0]
a = out[0].as_cpu()
a_pad = out[1].as_cpu()
for i in range(batch_size):
t = a.at(i)
t_pad = a_pad.at(i)
if layout == types.NCHW:
assert t.shape == (3, 224, 224)
assert t_pad.shape == (4, 224, 224)
assert np.sum(np.abs(t - t_pad[:3, :, :])) == 0
assert np.sum(np.abs(t_pad[3, :, :])) == 0
else:
assert t.shape == (224, 224, 3)
assert t_pad.shape == (224, 224, 4)
assert np.sum(np.abs(t - t_pad[:, :, :3])) == 0
assert np.sum(np.abs(t_pad[:, :, 3])) == 0
def test_cropmirrornormalize_multiple_inputs():
batch_size = 13
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads=1, device_id=0, num_gpus=1, device="cpu"):
super(HybridPipe, self).__init__(batch_size,
num_threads,
device_id)
self.device = device
self.input = ops.readers.Caffe(
path=caffe_db_folder, shard_id=device_id, num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.decode2 = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.cmnp = ops.CropMirrorNormalize(device=device,
dtype=types.FLOAT,
output_layout=types.NHWC,
crop=(224, 224),
mean=[128., 128., 128.],
std=[1., 1., 1.])
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
images2 = self.decode2(inputs)
images_device = images if self.device == "cpu" else images.gpu()
images2_device = images2 if self.device == "cpu" else images2.gpu()
output1, output2 = self.cmnp([images_device, images2_device])
output3 = self.cmnp([images_device])
output4 = self.cmnp([images2_device])
return (output1, output2, output3, output4)
for device in ["cpu", "gpu"]:
pipe = HybridPipe(batch_size=batch_size, device=device)
pipe.build()
for _ in range(5):
out1, out2, out3, out4 = pipe.run()
outs = [out.as_cpu() if device == 'gpu' else out for out in [out1, out2, out3, out4]]
check_batch(outs[0], outs[1], batch_size)
check_batch(outs[0], outs[2], batch_size)
check_batch(outs[1], outs[3], batch_size)
def test_seed():
batch_size = 64
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(HybridPipe, self).__init__(batch_size,
num_threads,
device_id,
seed=12)
self.input = ops.readers.Caffe(path=caffe_db_folder, random_shuffle=True)
self.decode = ops.decoders.Image(device="mixed", output_type=types.RGB)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
dtype=types.FLOAT,
crop=(224, 224),
mean=[128., 128., 128.],
std=[1., 1., 1.])
self.coin = ops.random.CoinFlip()
self.uniform = ops.random.Uniform(range=(0.0, 1.0))
self.iter = 0
def define_graph(self):
self.jpegs, self.labels = self.input()
images = self.decode(self.jpegs)
mirror = self.coin()
output = self.cmnp(images, mirror=mirror, crop_pos_x=self.uniform(),
crop_pos_y=self.uniform())
return (output, self.labels)
n = 30
for i in range(50):
pipe = HybridPipe(batch_size=batch_size, num_threads=2, device_id=0)
pipe.build()
pipe_out = pipe.run()
pipe_out_cpu = pipe_out[0].as_cpu()
img_chw_test = pipe_out_cpu.at(n)
if i == 0:
img_chw = img_chw_test
assert np.sum(np.abs(img_chw - img_chw_test)) == 0
def test_none_seed():
batch_size = 60
for i in range(50):
pipe = Pipeline(batch_size=batch_size, num_threads=2, device_id=0, seed=None)
with pipe:
coin = fn.random.uniform(range=(0.0, 1.0))
pipe.set_outputs(coin)
pipe.build()
pipe_out = pipe.run()[0]
test_out = pipe_out.as_array()
if i == 0:
test_out_ref = test_out
else:
assert np.sum(np.abs(test_out_ref - test_out)) != 0
def test_as_array():
batch_size = 64
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(HybridPipe, self).__init__(batch_size,
num_threads,
device_id,
seed=12)
self.input = ops.readers.Caffe(path=caffe_db_folder, random_shuffle=True)
self.decode = ops.decoders.Image(device="mixed", output_type=types.RGB)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
dtype=types.FLOAT,
crop=(224, 224),
mean=[128., 128., 128.],
std=[1., 1., 1.])
self.coin = ops.random.CoinFlip()
self.uniform = ops.random.Uniform(range=(0.0, 1.0))
self.iter = 0
def define_graph(self):
self.jpegs, self.labels = self.input()
images = self.decode(self.jpegs)
mirror = self.coin()
output = self.cmnp(images, mirror=mirror, crop_pos_x=self.uniform(),
crop_pos_y=self.uniform())
return (output, self.labels)
for i in range(50):
pipe = HybridPipe(batch_size=batch_size,
num_threads=2,
device_id=0)
pipe.build()
pipe_out = pipe.run()
pipe_out_cpu = pipe_out[0].as_cpu()
img_chw_test = pipe_out_cpu.as_array()
if i == 0:
img_chw = img_chw_test
assert img_chw_test.shape == (batch_size, 3, 224, 224)
assert np.sum(np.abs(img_chw - img_chw_test)) == 0
def test_seed_serialize():
batch_size = 64
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(HybridPipe, self).__init__(batch_size,
num_threads,
device_id,
seed=12)
self.input = ops.readers.Caffe(path=caffe_db_folder, random_shuffle=True)
self.decode = ops.decoders.Image(device="mixed", output_type=types.RGB)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
dtype=types.FLOAT,
crop=(224, 224),
mean=[128., 128., 128.],
std=[1., 1., 1.])
self.coin = ops.random.CoinFlip()
self.uniform = ops.random.Uniform(range=(0.0, 1.0))
self.iter = 0
def define_graph(self):
self.jpegs, self.labels = self.input()
images = self.decode(self.jpegs)
mirror = self.coin()
output = self.cmnp(images, mirror=mirror, crop_pos_x=self.uniform(),
crop_pos_y=self.uniform())
return (output, self.labels)
n = 30
orig_pipe = HybridPipe(batch_size=batch_size,
num_threads=2,
device_id=0)
s = orig_pipe.serialize()
for i in range(50):
pipe = Pipeline()
pipe.deserialize_and_build(s)
pipe_out = pipe.run()
pipe_out_cpu = pipe_out[0].as_cpu()
img_chw_test = pipe_out_cpu.at(n)
if i == 0:
img_chw = img_chw_test
assert np.sum(np.abs(img_chw - img_chw_test)) == 0
def test_make_contiguous_serialize():
batch_size = 32
class COCOPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(COCOPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.readers.COCO(
file_root=coco_image_folder, annotations_file=coco_annotation_file,
ratio=True, ltrb=True)
self.decode = ops.decoders.Image(device="mixed")
self.crop = ops.RandomBBoxCrop(device="cpu", seed=12)
self.slice = ops.Slice(device="gpu")
def define_graph(self):
inputs, bboxes, labels = self.input()
images = self.decode(inputs)
crop_begin, crop_size, bboxes, labels = self.crop(bboxes, labels)
images = self.slice(images, crop_begin, crop_size)
return images
pipe = COCOPipeline(batch_size=batch_size, num_threads=2, device_id=0)
serialized_pipeline = pipe.serialize()
del pipe
new_pipe = Pipeline(batch_size=batch_size, num_threads=2, device_id=0)
new_pipe.deserialize_and_build(serialized_pipeline)
def test_make_contiguous_serialize_and_use():
batch_size = 2
class COCOPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(COCOPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.readers.COCO(
file_root=coco_image_folder, annotations_file=coco_annotation_file,
ratio=True, ltrb=True)
self.decode = ops.decoders.Image(device="mixed")
self.crop = ops.RandomBBoxCrop(device="cpu", seed=25)
self.slice = ops.Slice(device="gpu")
def define_graph(self):
inputs, bboxes, labels = self.input()
images = self.decode(inputs)
crop_begin, crop_size, bboxes, labels = self.crop(bboxes, labels)
images = self.slice(images, crop_begin, crop_size)
return images
pipe = COCOPipeline(batch_size=batch_size, num_threads=2, device_id=0)
serialized_pipeline = pipe.serialize()
new_pipe = Pipeline(batch_size=batch_size, num_threads=2, device_id=0)
new_pipe.deserialize_and_build(serialized_pipeline)
compare_pipelines(pipe, new_pipe, batch_size, 50)
def test_warpaffine():
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(HybridPipe, self).__init__(batch_size, num_threads, device_id, seed=12)
self.input = ops.readers.Caffe(path=caffe_db_folder, random_shuffle=True)
self.decode = ops.decoders.Image(device="mixed", output_type=types.RGB)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
dtype=types.FLOAT,
output_layout=types.NHWC,
crop=(224, 224),
mean=[128., 128., 128.],
std=[1., 1., 1.])
self.affine = ops.WarpAffine(device="gpu",
matrix=[1.0, 0.8, -0.8 * 112, 0.0, 1.2, -0.2 * 112],
fill_value=128,
interp_type=types.INTERP_LINEAR)
self.iter = 0
def define_graph(self):
self.jpegs, self.labels = self.input()
images = self.decode(self.jpegs)
outputs = self.cmnp([images, images])
outputs[1] = self.affine(outputs[1])
return [self.labels] + outputs
pipe = HybridPipe(batch_size=128, num_threads=2, device_id=0)
pipe.build()
pipe_out = pipe.run()
import cv2
orig_cpu = pipe_out[1].as_cpu()
for i in range(128):
orig = orig_cpu.at(i)
# apply 0.5 correction for opencv's not-so-good notion of pixel centers
M = np.array([1.0, 0.8, -0.8 * (112 - 0.5), 0.0, 1.2, -0.2 * (112 - 0.5)]).reshape((2, 3))
out = cv2.warpAffine(
orig, M, (224, 224), borderMode=cv2.BORDER_CONSTANT, borderValue=(128, 128, 128),
flags=(cv2.WARP_INVERSE_MAP + cv2.INTER_LINEAR))
dali_output = pipe_out[2].as_cpu().at(i)
maxdif = np.max(cv2.absdiff(out, dali_output) / 255.0)
assert maxdif < 0.025
def test_type_conversion():
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(HybridPipe, self).__init__(batch_size, num_threads, device_id, seed=12)
self.input = ops.readers.Caffe(path=caffe_db_folder, random_shuffle=True)
self.decode = ops.decoders.Image(device="mixed", output_type=types.RGB)
self.cmnp_all = ops.CropMirrorNormalize(
device="gpu",
dtype=types.FLOAT,
output_layout=types.NHWC,
crop=(224, 224),
mean=[128., 128., 128.],
std=[1., 1., 1.])
self.cmnp_int = ops.CropMirrorNormalize(
device="gpu",
dtype=types.FLOAT,
output_layout=types.NHWC,
crop=(224, 224),
mean=[128, 128, 128],
std=[1., 1, 1]) # Left 1 of the args as float to test whether mixing types works
self.cmnp_1arg = ops.CropMirrorNormalize(
device="gpu",
dtype=types.FLOAT,
output_layout=types.NHWC,
crop=(224, 224),
mean=128,
std=1)
self.uniform = ops.random.Uniform(range=(0, 1))
def define_graph(self):
self.jpegs, self.labels = self.input()
images = self.decode(self.jpegs)
outputs = [None for i in range(3)]
crop_pos_x = self.uniform()
crop_pos_y = self.uniform()
outputs[0] = self.cmnp_all(
images, crop_pos_x=crop_pos_x, crop_pos_y=crop_pos_y)
outputs[1] = self.cmnp_int(
images, crop_pos_x=crop_pos_x, crop_pos_y=crop_pos_y)
outputs[2] = self.cmnp_1arg(
images, crop_pos_x=crop_pos_x, crop_pos_y=crop_pos_y)
return [self.labels] + outputs
pipe = HybridPipe(batch_size=128, num_threads=2, device_id=0)
pipe.build()
for i in range(10):
pipe_out = pipe.run()
orig_cpu = pipe_out[1].as_cpu().as_tensor()
int_cpu = pipe_out[2].as_cpu().as_tensor()
arg1_cpu = pipe_out[3].as_cpu().as_tensor()
assert_array_equal(orig_cpu, int_cpu)
assert_array_equal(orig_cpu, arg1_cpu)
class ExternalInputIterator(object):
def __init__(self, batch_size):
self.batch_size = batch_size
def __iter__(self):
self.i = 0
self.n = self.batch_size
return self
def __next__(self):
pos = []
size = []
for _ in range(self.batch_size):
pos.append(np.asarray([0.4, 0.2], dtype=np.float32))
size.append(np.asarray([0.3, 0.5], dtype=np.float32))
self.i = (self.i + 1) % self.n
return (pos, size)
next = __next__
class LazyPipeline(Pipeline):
def __init__(self, batch_size, db_folder, lazy_type, num_threads=1, device_id=0, num_gpus=1):
super(LazyPipeline, self).__init__(batch_size,
num_threads,
device_id)
self.input = ops.readers.Caffe(
path=db_folder, shard_id=device_id, num_shards=num_gpus, lazy_init=lazy_type)
self.decode = ops.decoders.Image(device="mixed", output_type=types.RGB)
self.pos_rng_x = ops.random.Uniform(range=(0.0, 1.0), seed=1234)
self.pos_rng_y = ops.random.Uniform(range=(0.0, 1.0), seed=5678)
self.crop = ops.Crop(device="gpu", crop=(224, 224))
def define_graph(self):
self.jpegs, self.labels = self.input()
pos_x = self.pos_rng_x()
pos_y = self.pos_rng_y()
images = self.decode(self.jpegs)
crop = self.crop(images, crop_pos_x=pos_x, crop_pos_y=pos_y)
return (crop, self.labels)
def test_lazy_init_empty_data_path():
empty_db_folder = "/data/empty"
batch_size = 128
nonlazy_pipe = LazyPipeline(batch_size, empty_db_folder, lazy_type=False)
with assert_raises(RuntimeError):
nonlazy_pipe.build()
lazy_pipe = LazyPipeline(batch_size, empty_db_folder, lazy_type=True)
lazy_pipe.build()
def test_lazy_init():
"""
Comparing results of pipeline: lazy_init false and lazy_init
true with empty folder and real folder
"""
batch_size = 128
compare_pipelines(LazyPipeline(batch_size, caffe_db_folder, lazy_type=False),
LazyPipeline(batch_size, caffe_db_folder, lazy_type=True),
batch_size=batch_size, N_iterations=20)
def test_iter_setup():
class TestIterator():
def __init__(self, n):
self.n = n
def __iter__(self):
self.i = 0
return self
def __next__(self):
batch = []
if self.i < self.n:
batch.append(np.arange(0, 1, dtype=np.float))
self.i += 1
return batch
else:
self.i = 0
raise StopIteration
next = __next__
class IterSetupPipeline(Pipeline):
def __init__(self, iterator, num_threads, device_id):
super(IterSetupPipeline, self).__init__(1, num_threads, device_id)
self.input = ops.ExternalSource()
self.iterator = iterator
def define_graph(self):
self.batch = self.input()
return self.batch
def iter_setup(self):
batch = next(self.iterator)
self.feed_input(self.batch, batch)
iter_num = 5
iterator = iter(TestIterator(iter_num))
i = 0
while True:
try:
next(iterator)
i += 1
except StopIteration:
break
assert iter_num == i
iterator = iter(TestIterator(iter_num))
pipe = IterSetupPipeline(iterator, 3, 0)
pipe.build()
i = 0
while True:
try:
pipe.run()
i += 1
except StopIteration:
break
assert iter_num == i
pipe.reset()
i = 0
while True:
try:
pipe.run()
i += 1
except StopIteration:
break
assert iter_num == i
def test_pipeline_default_cuda_stream_priority():
batch_size = 16
n_iters = 12
class HybridPipe(Pipeline):
def __init__(self, batch_size, default_cuda_stream_priority=0):
super(HybridPipe, self).__init__(
batch_size,
num_threads=1,
device_id=0, prefetch_queue_depth=1,
exec_async=False, exec_pipelined=False,
default_cuda_stream_priority=default_cuda_stream_priority)
self.input = ops.readers.Caffe(path=caffe_db_folder)
self.decode = ops.decoders.Image(device="mixed", output_type=types.RGB)
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
return images
HIGH_PRIORITY = -1
LOW_PRIORITY = 0
pipe1 = HybridPipe(batch_size=batch_size, default_cuda_stream_priority=HIGH_PRIORITY)
pipe2 = HybridPipe(batch_size=batch_size, default_cuda_stream_priority=LOW_PRIORITY)
pipe1.build()
pipe2.build()
for _ in range(n_iters):
out1 = pipe1.run()
out2 = pipe2.run()
for i in range(batch_size):
out1_data = out1[0].as_cpu()
out2_data = out2[0].as_cpu()
assert np.sum(np.abs(out1_data.at(i) - out2_data.at(i))) == 0
class CachedPipeline(Pipeline):
def __init__(self, reader_type, batch_size, is_cached=False, is_cached_batch_copy=True,
seed=123456, skip_cached_images=False, num_shards=30):
super(CachedPipeline, self).__init__(batch_size, num_threads=1, device_id=0,
prefetch_queue_depth=1, seed=seed)
self.reader_type = reader_type
if reader_type == "readers.MXNet":
self.input = ops.readers.MXNet(path=os.path.join(recordio_db_folder, "train.rec"),
index_path=os.path.join(recordio_db_folder, "train.idx"),
shard_id=0,
num_shards=num_shards,
stick_to_shard=True,
skip_cached_images=skip_cached_images,
prefetch_queue_depth=1)
elif reader_type == "readers.Caffe":
self.input = ops.readers.Caffe(path=caffe_db_folder,
shard_id=0,
num_shards=num_shards,
stick_to_shard=True,
skip_cached_images=skip_cached_images,
prefetch_queue_depth=1)
elif reader_type == "readers.Caffe2":
self.input = ops.readers.Caffe2(path=c2lmdb_db_folder,
shard_id=0,
num_shards=num_shards,
stick_to_shard=True,
skip_cached_images=skip_cached_images,
prefetch_queue_depth=1)
elif reader_type == "readers.File":
self.input = ops.readers.File(file_root=jpeg_folder,
shard_id=0,
num_shards=num_shards,
stick_to_shard=True,
skip_cached_images=skip_cached_images,
prefetch_queue_depth=1)
elif reader_type == "readers.TFRecord":
tfrecord = sorted(glob.glob(os.path.join(tfrecord_db_folder, '*[!i][!d][!x]')))
tfrecord_idx = sorted(glob.glob(os.path.join(tfrecord_db_folder, '*idx')))
self.input = ops.readers.TFRecord(
path=tfrecord,
index_path=tfrecord_idx,
shard_id=0,
num_shards=num_shards,
stick_to_shard=True,
skip_cached_images=skip_cached_images,
features={
"image/encoded": tfrec.FixedLenFeature((), tfrec.string, ""),
"image/class/label": tfrec.FixedLenFeature([1], tfrec.int64, -1)
})
elif reader_type == "readers.Webdataset":
wds = [os.path.join(webdataset_db_folder, archive)
for archive in ['devel-1.tar', 'devel-2.tar', 'devel-0.tar']]
self.wds_index_files = [generate_temp_wds_index(archive) for archive in wds]
self.input = ops.readers.Webdataset(
paths=wds, index_paths=[idx.name for idx in self.wds_index_files],
ext=["jpg", "cls"], shard_id=0, num_shards=num_shards, stick_to_shard=True,
skip_cached_images=skip_cached_images)
if is_cached:
self.decode = ops.decoders.Image(
device="mixed",
output_type=types.RGB,
cache_size=2000,
cache_threshold=0,
cache_type='threshold',
cache_debug=False,
hw_decoder_load=0.0, # 0.0 for deterministic results
cache_batch_copy=is_cached_batch_copy)
else:
# hw_decoder_load=0.0 for deterministic results
self.decode = ops.decoders.Image(
device="mixed", output_type=types.RGB, hw_decoder_load=0.0)
def define_graph(self):
if self.reader_type == "readers.TFRecord":
inputs = self.input()
jpegs = inputs["image/encoded"]
labels = inputs["image/class/label"]
else:
jpegs, labels = self.input()
images = self.decode(jpegs)
return (images, labels)
def test_nvjpeg_cached_batch_copy_pipelines():
batch_size = 26
for reader_type in [
"readers.MXNet", "readers.Caffe", "readers.Caffe2", "readers.File", "readers.TFRecord",
"readers.Webdataset"
]:
compare_pipelines(
CachedPipeline(reader_type, batch_size, is_cached=True, is_cached_batch_copy=True),
CachedPipeline(reader_type, batch_size, is_cached=True, is_cached_batch_copy=False),
batch_size=batch_size, N_iterations=20)
def test_nvjpeg_cached_pipelines():
batch_size = 26
for reader_type in [
"readers.MXNet", "readers.Caffe", "readers.Caffe2", "readers.File", "readers.TFRecord",
"readers.Webdataset"
]:
compare_pipelines(CachedPipeline(reader_type, batch_size, is_cached=False),
CachedPipeline(reader_type, batch_size, is_cached=True),
batch_size=batch_size, N_iterations=20)
def test_skip_cached_images():
batch_size = 1
for reader_type in [
"readers.MXNet", "readers.Caffe", "readers.Caffe2", "readers.File", "readers.Webdataset"
]:
compare_pipelines(
CachedPipeline(reader_type, batch_size, is_cached=False),
CachedPipeline(reader_type, batch_size, is_cached=True, skip_cached_images=True),
batch_size=batch_size, N_iterations=100)
def test_caffe_no_label():
class CaffePipeline(Pipeline):
def __init__(self, batch_size, path_to_data, labels, seed=123456,
skip_cached_images=False, num_shards=1):
super(CaffePipeline, self).__init__(batch_size, num_threads=1, device_id=0,
prefetch_queue_depth=1, seed=seed)
self.input = ops.readers.Caffe(path=path_to_data,
shard_id=0,
num_shards=num_shards,
stick_to_shard=True,
prefetch_queue_depth=1,
label_available=labels)
self.decode = ops.decoders.Image(output_type=types.RGB)
self.labels = labels
def define_graph(self):
if not self.labels:
jpegs = self.input()
else:
jpegs, _ = self.input()
images = self.decode(jpegs)
return (images)
pipe = CaffePipeline(2, caffe_db_folder, True)
pipe.build()
pipe.run()
pipe = CaffePipeline(2, caffe_no_label_db_folder, False)
pipe.build()
pipe.run()
def test_caffe2_no_label():
class Caffe2Pipeline(Pipeline):
def __init__(self, batch_size, path_to_data, label_type, seed=123456,
skip_cached_images=False, num_shards=1):
super(Caffe2Pipeline, self).__init__(batch_size, num_threads=1, device_id=0,
prefetch_queue_depth=1, seed=seed)
self.input = ops.readers.Caffe2(path=path_to_data,
shard_id=0,
num_shards=num_shards,
stick_to_shard=True,
prefetch_queue_depth=1,
label_type=label_type)
self.decode = ops.decoders.Image(output_type=types.RGB)
self.label_type = label_type
def define_graph(self):
if self.label_type == 4:
jpegs = self.input()
else:
jpegs, _ = self.input()
images = self.decode(jpegs)
return (images)
pipe = Caffe2Pipeline(2, c2lmdb_db_folder, 0)
pipe.build()
pipe.run()
pipe = Caffe2Pipeline(2, c2lmdb_no_label_db_folder, 4)
pipe.build()
pipe.run()
def test_as_tensor():
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(HybridPipe, self).__init__(batch_size, num_threads, device_id, seed=12)
self.input = ops.readers.Caffe(path=caffe_db_folder, random_shuffle=True)
def define_graph(self):
_, self.labels = self.input()
return self.labels
batch_size = 8
shape = [[2, 2, 2], [8, 1], [1, 8], [4, 2], [2, 4], [8], [1, 2, 1, 2, 1, 2], [1, 1, 1, 8]]
pipe = HybridPipe(batch_size=batch_size, num_threads=2, device_id=0)
pipe.build()
for sh in shape:
pipe_out = pipe.run()[0]
assert pipe_out.as_tensor().shape() == [batch_size, 1]
assert pipe_out.as_reshaped_tensor(sh).shape() == sh
different_shape = random.choice(shape)
assert pipe_out.as_reshaped_tensor(different_shape).shape() == different_shape
def test_as_tensor_fail():
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(HybridPipe, self).__init__(batch_size, num_threads, device_id, seed=12)
self.input = ops.readers.Caffe(path=caffe_db_folder, random_shuffle=True)
def define_graph(self):
_, self.labels = self.input()
return self.labels
batch_size = 8
shape = [[2, 2, 2, 3], [8, 1, 6], [1, 8, 4], [4, 2, 9], [2, 4, 0], [8, 2],
[1, 2, 1, 2, 1, 2, 3], [7, 1, 1, 1, 8]]
pipe = HybridPipe(batch_size=batch_size, num_threads=2, device_id=0)
pipe.build()
for sh in shape:
pipe_out = pipe.run()[0]
assert pipe_out.as_tensor().shape() == [batch_size, 1]
with assert_raises(RuntimeError):
pipe_out.as_reshaped_tensor(sh).shape()
def test_python_formats():
class TestPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus, test_array):
super(TestPipeline, self).__init__(batch_size, num_threads, device_id)
self.input_data = ops.ExternalSource()
self.test_array = test_array
def define_graph(self):
self.data = self.input_data()
return (self.data)
def iter_setup(self):
self.feed_input(self.data, self.test_array)
for t in [
np.bool_, np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64, np.float_, np.float32, np.float16, np.short, np.long,
np.longlong, np.ushort, np.ulonglong
]:
test_array = np.array([[1, 1], [1, 1]], dtype=t)
pipe = TestPipeline(2, 1, 0, 1, test_array)
pipe.build()
out = pipe.run()[0]
out_dtype = out.at(0).dtype
assert test_array.dtype.itemsize == out_dtype.itemsize
assert test_array.dtype.str == out_dtype.str
def test_api_check1():
batch_size = 1
class TestPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus):
super(TestPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.readers.Caffe(
path=caffe_db_folder, shard_id=device_id, num_shards=num_gpus)
def define_graph(self):
inputs, labels = self.input(name="Reader")
return (inputs)
pipe = TestPipeline(batch_size=batch_size, num_threads=1, device_id=0, num_gpus=1)
pipe.build()
pipe.run()
for method in [pipe.schedule_run, pipe.share_outputs, pipe.release_outputs, pipe.outputs]:
with assert_raises(
RuntimeError,
glob=("Mixing pipeline API type. Currently used: PipelineAPIType.BASIC,"
" but trying to use PipelineAPIType.SCHEDULED")):
method()
# disable check
pipe.enable_api_check(False)
for method in [pipe.schedule_run, pipe.share_outputs, pipe.release_outputs, pipe.outputs]:
method()
def test_api_check2():
batch_size = 1
class TestPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus):
super(TestPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.readers.Caffe(
path=caffe_db_folder, shard_id=device_id, num_shards=num_gpus)
def define_graph(self):
inputs, labels = self.input(name="Reader")
return (inputs)
pipe = TestPipeline(batch_size=batch_size, num_threads=1, device_id=0, num_gpus=1)
pipe.build()
pipe.schedule_run()
pipe.share_outputs()
pipe.release_outputs()
pipe.schedule_run()
pipe.outputs()
with assert_raises(
RuntimeError,
glob=("Mixing pipeline API type. Currently used: PipelineAPIType.SCHEDULED,"
" but trying to use PipelineAPIType.BASIC")):
pipe.run()
# disable check
pipe.enable_api_check(False)
pipe.run()
class DupPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, first_out_device="cpu",
second_out_device="cpu"):
super(DupPipeline, self).__init__(batch_size, num_threads, device_id)
self.first_out_device = first_out_device
self.second_out_device = second_out_device
self.input = ops.readers.Caffe(path=caffe_db_folder, shard_id=device_id, num_shards=1)
self.decode = ops.decoders.Image(
device="mixed" if first_out_device == "mixed" else "cpu", output_type=types.RGB)
if self.second_out_device:
self.cmnp = ops.CropMirrorNormalize(device=second_out_device,
dtype=types.FLOAT,
output_layout=types.NHWC,
crop=(224, 224),
mean=[128., 128., 128.],
std=[1., 1., 1.])
def define_graph(self):
inputs, _ = self.input()
decoded_images = self.decode(inputs)
if self.second_out_device:
if self.first_out_device != "mixed" and self.second_out_device == "gpu":
images = self.cmnp(decoded_images.gpu())
else:
images = self.cmnp(decoded_images)
else:
images = decoded_images
images_2 = images
return images, images_2, images, decoded_images
def check_duplicated_outs_pipeline(first_device, second_device):
batch_size = 5
pipe = DupPipeline(batch_size=batch_size, num_threads=2, device_id=0,
first_out_device=first_device, second_out_device=second_device)
pipe.build()
out = pipe.run()
assert len(out) == 4
for i in range(batch_size):
assert isinstance(out[3][0], dali.backend_impl.TensorGPU) or first_device == "cpu"
out1 = as_array(out[0][i])
out2 = as_array(out[1][i])
out3 = as_array(out[2][i])
np.testing.assert_array_equal(out1, out2)
np.testing.assert_array_equal(out1, out3)
def test_duplicated_outs_pipeline():
for first_device, second_device in [("cpu", None),
("cpu", "cpu"),
("cpu", "gpu"),
("mixed", None),
("mixed", "gpu")]:
yield check_duplicated_outs_pipeline, first_device, second_device
def check_serialized_outs_duplicated_pipeline(first_device, second_device):
batch_size = 5
pipe = DupPipeline(batch_size=batch_size, num_threads=2, device_id=0,
first_out_device=first_device, second_out_device=second_device)
serialized_pipeline = pipe.serialize()
del pipe
new_pipe = Pipeline(batch_size=batch_size, num_threads=2, device_id=0)
new_pipe.deserialize_and_build(serialized_pipeline)
out = new_pipe.run()
assert len(out) == 4
for i in range(batch_size):
assert isinstance(out[3][0], dali.backend_impl.TensorGPU) or first_device == "cpu"
out1 = as_array(out[0][i])
out2 = as_array(out[1][i])
out3 = as_array(out[2][i])
np.testing.assert_array_equal(out1, out2)
np.testing.assert_array_equal(out1, out3)
def test_serialized_outs_duplicated_pipeline():
for first_device, second_device in [("cpu", None),
("cpu", "cpu"),
("cpu", "gpu"),
("mixed", None),
("mixed", "gpu")]:
yield check_serialized_outs_duplicated_pipeline, first_device, second_device
def check_duplicated_outs_cpu_to_gpu(device):
class SliceArgsIterator(object):
def __init__(
self,
batch_size,
num_dims=3,
image_shape=None, # Needed if normalized_anchor and normalized_shape are False
image_layout=None, # Needed if axis_names is used to specify the slice
normalized_anchor=True,
normalized_shape=True,
axes=None,
axis_names=None,
min_norm_anchor=0.0,
max_norm_anchor=0.2,
min_norm_shape=0.4,
max_norm_shape=0.75,
seed=54643613):
self.batch_size = batch_size
self.num_dims = num_dims
self.image_shape = image_shape
self.image_layout = image_layout
self.normalized_anchor = normalized_anchor
self.normalized_shape = normalized_shape
self.axes = axes
self.axis_names = axis_names
self.min_norm_anchor = min_norm_anchor
self.max_norm_anchor = max_norm_anchor
self.min_norm_shape = min_norm_shape
self.max_norm_shape = max_norm_shape
self.seed = seed
if not self.axis_names and not self.axes:
self.axis_names = "WH"
if self.axis_names:
self.axes = []
for axis_name in self.axis_names:
assert axis_name in self.image_layout
self.axes.append(self.image_layout.index(axis_name))
assert len(self.axes) > 0
def __iter__(self):
self.i = 0
self.n = self.batch_size
return self
def __next__(self):
pos = []
size = []
anchor_amplitude = self.max_norm_anchor - self.min_norm_anchor
anchor_offset = self.min_norm_anchor
shape_amplitude = self.max_norm_shape - self.min_norm_shape
shape_offset = self.min_norm_shape
np.random.seed(self.seed)
for k in range(self.batch_size):
norm_anchor = anchor_amplitude * np.random.rand(len(self.axes)) + anchor_offset
norm_shape = shape_amplitude * np.random.rand(len(self.axes)) + shape_offset
if self.normalized_anchor:
anchor = norm_anchor
else:
anchor = [
floor(norm_anchor[i] * self.image_shape[self.axes[i]])
for i in range(len(self.axes))]
if self.normalized_shape:
shape = norm_shape
else:
shape = [
floor(norm_shape[i] * self.image_shape[self.axes[i]])
for i in range(len(self.axes))]
pos.append(np.asarray(anchor, dtype=np.float32))
size.append(np.asarray(shape, dtype=np.float32))
self.i = (self.i + 1) % self.n
return (pos, size)
next = __next__
class SliceSynthDataPipeline(Pipeline):
def __init__(self, device, batch_size, layout, iterator, pos_size_iter,
num_threads=1, device_id=0, num_gpus=1,
axes=None, axis_names=None, normalized_anchor=True, normalized_shape=True):
super(SliceSynthDataPipeline, self).__init__(
batch_size, num_threads, device_id, seed=1234)
self.device = device
self.layout = layout
self.iterator = iterator
self.pos_size_iter = pos_size_iter
self.inputs = ops.ExternalSource()
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
if axis_names:
self.slice = ops.Slice(
device=self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axis_names=axis_names)
elif axes:
self.slice = ops.Slice(
device=self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axes=axes)
else:
self.slice = ops.Slice(
device=self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,)
def define_graph(self):
self.data = self.inputs()
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
data = self.data.gpu() if self.device == 'gpu' else self.data
out = self.slice(data, self.crop_pos, self.crop_size)
return out, self.crop_pos, self.crop_size
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
batch_size = 1
input_shape = (200, 400, 3)
layout = "HWC"
axes = None
axis_names = "WH"
normalized_anchor = False
normalized_shape = False
eiis = [RandomDataIterator(batch_size, shape=input_shape) for k in range(2)]
eii_args = [
SliceArgsIterator(batch_size, len(input_shape), image_shape=input_shape,
image_layout=layout, axes=axes, axis_names=axis_names,
normalized_anchor=normalized_anchor, normalized_shape=normalized_shape)
for k in range(2)]
pipe = SliceSynthDataPipeline(device, batch_size, layout, iter(eiis[0]), iter(eii_args[0]),
axes=axes, axis_names=axis_names,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape)
pipe.build()
out = pipe.run()
assert isinstance(out[0][0], dali.backend_impl.TensorGPU) or device == "cpu"
assert not isinstance(out[1][0], dali.backend_impl.TensorGPU)
assert not isinstance(out[2][0], dali.backend_impl.TensorGPU)
def test_duplicated_outs_cpu_op_to_gpu():
# check if it is possible to return outputs from CPU op that goes directly to the GPU op without
# MakeContiguous as a CPU output from the pipeline
for device in ["cpu", "gpu"]:
yield check_duplicated_outs_cpu_to_gpu, device
def test_ref_count():
class HybridPipe(Pipeline):
def __init__(self):
super(HybridPipe, self).__init__(1, 1, 0, seed=12)
self.input = ops.readers.Caffe(path=caffe_db_folder, random_shuffle=True)
def define_graph(self):
_, self.labels = self.input()
return self.labels
pipe = HybridPipe()
assert sys.getrefcount(pipe) == 2
pipe.build()
assert sys.getrefcount(pipe) == 2
def test_executor_meta():
class TestPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus, seed):
super(TestPipeline, self).__init__(
batch_size, num_threads, device_id, enable_memory_stats=True)
self.input = ops.readers.Caffe(
path=caffe_db_folder, shard_id=device_id, num_shards=num_gpus, seed=seed)
self.decode = ops.decoders.ImageRandomCrop(
device="mixed", output_type=types.RGB, seed=seed)
self.res = ops.Resize(device="gpu", resize_x=224, resize_y=224)
self.cmnp = ops.CropMirrorNormalize(
device="gpu", output_dtype=types.FLOAT,
crop=(224, 224), mean=[128., 128., 128.], std=[1., 1., 1.])
self.coin = ops.random.CoinFlip(seed=seed)
def define_graph(self):
self.jpegs, self.labels = self.input()
images = self.decode(self.jpegs)
resized_images = self.res(images)
mirror = self.coin()
output = self.cmnp(resized_images, mirror=mirror)
return (output, resized_images, self.labels)
random_seed = 123456
batch_size = 10
test_pipe = TestPipeline(
batch_size=batch_size, num_threads=1, device_id=0, num_gpus=1, seed=random_seed)
test_pipe.build()
test_pipe.run()
meta = test_pipe.executor_statistics()
# all operators (readers.Caffe, decoders.ImageRandomCrop, Resize, CropMirrorNormalize,
# CoinFlip) + make_contiguous * 3 (all outputs)
assert len(meta) == 8
for k in meta.keys():
if "CropMirrorNormalize" in k:
crop_meta = meta[k]
assert crop_meta["real_memory_size"] == crop_meta["reserved_memory_size"]
# size of crop * num_of_channels * batch_size * data_size
assert crop_meta["real_memory_size"][0] == 224 * 224 * 3 * batch_size * 4
for k in meta.keys():
if "CoinFlip" in k:
coin_meta = meta[k]
assert coin_meta["real_memory_size"] == coin_meta["reserved_memory_size"]
# batch_size * data_size
assert coin_meta["real_memory_size"][0] == batch_size * 4
for k, v in meta.items():
assert v["real_memory_size"] <= v["reserved_memory_size"]
def calc_avg_max(val):
return [int(ceil(v / batch_size)) for v in val]
# for CPU the biggest tensor is usually bigger than the average,
# for the GPU max is the average
if "CPU" in k or "MIXED" in k:
assert calc_avg_max(v["real_memory_size"]) <= v["max_real_memory_size"]
assert calc_avg_max(v["reserved_memory_size"]) <= v["max_reserved_memory_size"]
else:
assert calc_avg_max(v["real_memory_size"]) == v["max_real_memory_size"]
assert calc_avg_max(v["reserved_memory_size"]) == v["max_reserved_memory_size"]
def test_bytes_per_sample_hint():
import nvidia.dali.backend
if nvidia.dali.backend.RestrictPinnedMemUsage():
raise SkipTest
nvidia.dali.backend.SetHostBufferShrinkThreshold(0)
def obtain_reader_meta(iters=3, **kvargs):
batch_size = 10
pipe = Pipeline(batch_size, 1, 0, enable_memory_stats=True)
with pipe:
out = fn.readers.caffe(path=caffe_db_folder, shard_id=0, num_shards=1, **kvargs)
out = [o.gpu() for o in out]
pipe.set_outputs(*out)
pipe.build()
for _ in range(iters):
pipe.run()
meta = pipe.executor_statistics()
reader_meta = None
for k in meta.keys():
if "CPU___Caffe" in k:
reader_meta = meta[k]
return reader_meta
reader_meta = obtain_reader_meta(iters=10)
new_reader_meta = obtain_reader_meta(
iters=1,
bytes_per_sample_hint=[int(v * 1.1) for v in reader_meta['max_reserved_memory_size']])
assert new_reader_meta['max_reserved_memory_size'] > reader_meta['max_reserved_memory_size']
def trigger_output_dtype_deprecated_warning():
batch_size = 10
shape = (120, 60, 3)
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
data = RandomDataIterator(batch_size, shape=shape, dtype=np.uint8)
with pipe:
input = fn.external_source(data, layout="HWC")
cmn = fn.crop_mirror_normalize(input, device="cpu",
output_dtype=types.FLOAT,
output_layout="HWC",
crop=(32, 32),
mean=[128., 128., 128.],
std=[1., 1., 1.])
pipe.set_outputs(cmn)
pipe.build()
result, = pipe.run()
assert result.as_array().dtype == np.float32
def trigger_image_type_deprecated_warning():
batch_size = 10
shape = (120, 60, 3)
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
data = RandomDataIterator(batch_size, shape=shape, dtype=np.uint8)
with pipe:
input = fn.external_source(data, layout="HWC")
cmn = fn.crop_mirror_normalize(input, device="cpu",
dtype=types.FLOAT,
image_type=types.RGB,
output_layout="HWC",
crop=(32, 32),
mean=[128., 128., 128.],
std=[1., 1., 1.])
pipe.set_outputs(cmn)
pipe.build()
result, = pipe.run()
assert result.as_array().dtype == np.float32
def test_output_dtype_deprecation():
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
trigger_output_dtype_deprecated_warning()
# Verify DeprecationWarning
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
expected_msg = ("The argument ``output_dtype`` is a deprecated alias for ``dtype``. "
"Use ``dtype`` instead.")
assert expected_msg == str(w[-1].message)
def test_image_type_deprecation():
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
trigger_image_type_deprecated_warning()
# Verify DeprecationWarning
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
expected_msg = ("The argument ``image_type`` is no longer used and will be removed "
"in a future release.")
assert expected_msg == str(w[-1].message)
@raises(TypeError, glob="unexpected*output_dtype*dtype")
def test_output_dtype_both_error():
batch_size = 10
shape = (120, 60, 3)
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
data = RandomDataIterator(batch_size, shape=shape, dtype=np.uint8)
with pipe:
input = fn.external_source(data, layout="HWC")
cmn = fn.crop_mirror_normalize(input, device="cpu",
output_dtype=types.FLOAT,
dtype=types.FLOAT,
output_layout="HWC",
crop=(32, 32),
mean=[128., 128., 128.],
std=[1., 1., 1.])
pipe.set_outputs(cmn)
pipe.build()
def test_epoch_size():
class ReaderPipeline(Pipeline):
def __init__(self, batch_size):
super(ReaderPipeline, self).__init__(
batch_size, num_threads=1, device_id=0, prefetch_queue_depth=1)
self.input_mxnet = ops.readers.MXNet(
path=os.path.join(recordio_db_folder, "train.rec"),
index_path=os.path.join(recordio_db_folder, "train.idx"),
shard_id=0,
num_shards=1,
prefetch_queue_depth=1)
self.input_caffe = ops.readers.Caffe(
path=caffe_db_folder,
shard_id=0,
num_shards=1,
prefetch_queue_depth=1)
self.input_caffe2 = ops.readers.Caffe2(
path=c2lmdb_db_folder,
shard_id=0,
num_shards=1,
prefetch_queue_depth=1)
self.input_file = ops.readers.File(
file_root=jpeg_folder,
shard_id=0,
num_shards=1,
prefetch_queue_depth=1)
def define_graph(self):
jpegs_mxnet, _ = self.input_mxnet(name="readers.mxnet")
jpegs_caffe, _ = self.input_caffe(name="readers.caffe")
jpegs_caffe2, _ = self.input_caffe2(name="readers.caffe2")
jpegs_file, _ = self.input_file(name="readers.file")
return jpegs_mxnet, jpegs_caffe, jpegs_caffe2, jpegs_file
pipe = ReaderPipeline(1)
pipe.build()
meta = pipe.reader_meta()
assert len(meta) == 4
assert pipe.epoch_size("readers.mxnet") != 0
assert pipe.epoch_size("readers.caffe") != 0
assert pipe.epoch_size("readers.caffe2") != 0
assert pipe.epoch_size("readers.file") != 0
assert len(pipe.epoch_size()) == 4
def test_pipeline_out_of_scope():
def get_output():
pipe = dali.Pipeline(1, 1, 0)
with pipe:
pipe.set_outputs(dali.fn.external_source(source=[[np.array([-0.5, 1.25])]]))
pipe.build()
return pipe.run()
out = get_output()[0].at(0)
assert out[0] == -0.5 and out[1] == 1.25
def test_return_constants():
pipe = dali.Pipeline(1, 1, None)
types = [bool, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.float32]
pipe.set_outputs(np.array([[1, 2], [3, 4]]), 10, *[t(42) for t in types])
pipe.build()
a, b, *other = pipe.run()
assert np.array_equal(a.at(0), np.array([[1, 2], [3, 4]]))
assert b.at(0) == 10
for i, o in enumerate(other):
assert o.at(0) == types[i](42)
assert o.at(0).dtype == types[i]
def test_preserve_arg():
pipe = dali.Pipeline(1, 1, 0)
with pipe:
out = dali.fn.external_source(source=[[np.array([-0.5, 1.25])]], preserve=True)
res = dali.fn.resize(out, preserve=True) # noqa: F841
pipe.set_outputs(out)
pipe.build()
def test_pipeline_wrong_device_id():
pipe = dali.Pipeline(batch_size=1, num_threads=1, device_id=-123)
with pipe:
pipe.set_outputs(np.int32([1, 2, 3]))
with assert_raises(RuntimeError, glob="wrong device_id"):
pipe.build()
pipe.run()
def test_properties():
@dali.pipeline_def(batch_size=11, prefetch_queue_depth={"cpu_size": 3, "gpu_size": 2})
def my_pipe():
pipe = Pipeline.current()
assert pipe.max_batch_size == 11
assert pipe.batch_size == 11
assert pipe.num_threads == 3
assert pipe.device_id == 0
assert pipe.seed == 1234
assert pipe.exec_pipelined is True
assert pipe.exec_async is True
assert pipe.set_affinity is True
assert pipe.max_streams == -1
assert pipe.prefetch_queue_depth == {"cpu_size": 3, "gpu_size": 2}
assert pipe.cpu_queue_size == 3
assert pipe.gpu_queue_size == 2
assert pipe.py_num_workers == 3
assert pipe.py_start_method == "fork"
assert pipe.enable_memory_stats is False
return np.float32([1, 2, 3])
my_pipe(device_id=0, seed=1234, num_threads=3, set_affinity=True, py_num_workers=3)
def test_not_iterable():
import nvidia.dali._utils.hacks as hacks
import collections.abc
class X:
def __iter__(self):
pass
class Y:
def __iter__(self):
pass
assert isinstance(X(), collections.abc.Iterable)
hacks.not_iterable(X)
assert not isinstance(X(), collections.abc.Iterable)
assert isinstance(Y(), collections.abc.Iterable)
hacks.not_iterable(Y)
assert not isinstance(Y(), collections.abc.Iterable)
@pipeline_def(batch_size=1, num_threads=1, device_id=0)
def _identity_pipe():
x = fn.external_source(device="gpu", name="identity_input")
return x
@raises(TypeError, "*define_graph*callable*")
def test_invoke_serialize_error_handling_string():
_identity_pipe().serialize("any_string")
@raises(TypeError, "*define_graph*callable*")
def test_invoke_serialize_error_handling_not_string():
_identity_pipe().serialize(42)
def check_dtype_ndim(dali_pipeline, output_dtype, output_ndim, n_outputs):
def ndim_dtype_matches(test_value, ref_value):
ref_value = ref_value if isinstance(ref_value, (list, tuple)) else [ref_value] * n_outputs
return ref_value == test_value
import tempfile
with tempfile.NamedTemporaryFile() as f:
dali_pipeline.serialize(filename=f.name)
deserialized_pipeline = Pipeline.deserialize(filename=f.name)
deserialized_pipeline.build()
assert ndim_dtype_matches(deserialized_pipeline.output_ndim(), output_ndim), \
f"`output_ndim` is not serialized properly. {deserialized_pipeline.output_ndim()} vs {output_ndim}." # noqa: E501
assert ndim_dtype_matches(deserialized_pipeline.output_dtype(), output_dtype), \
f"`output_dtype` is not serialized properly. {deserialized_pipeline.output_dtype()} vs {output_dtype}." # noqa: E501
deserialized_pipeline.run()
dali_pipeline.build()
dali_pipeline.run()
@raises(RuntimeError, glob="Data type * does not match*")
def check_dtype_with_raise(dali_pipeline, output_dtype, output_ndim, n_outputs):
check_dtype_ndim(dali_pipeline, output_dtype, output_ndim, n_outputs)
@raises(RuntimeError, glob="Number of dimensions * does not match*")
def check_ndim_with_raise(dali_pipeline, output_dtype, output_ndim, n_outputs):
check_dtype_ndim(dali_pipeline, output_dtype, output_ndim, n_outputs)
@raises(RuntimeError, glob="Lengths * do not match*")
def check_length_error(dali_pipeline, output_dtype, output_ndim, n_outputs):
check_dtype_ndim(dali_pipeline, output_dtype, output_ndim, n_outputs)
def test_one_output_dtype_ndim():
@pipeline_def
def pipe():
inputs, labels = fn.readers.file(
file_root=os.path.join(get_dali_extra_path(), 'db', 'single', 'jpeg'), name="Reader")
decoded = fn.decoders.image(inputs, device="mixed", output_type=types.RGB)
return decoded
def create_test_package(output_dtype=None, output_ndim=None):
return pipe(batch_size=1, num_threads=1, device_id=0, output_dtype=output_dtype,
output_ndim=output_ndim), output_dtype, output_ndim
both_correct = create_test_package(output_dtype=[types.UINT8], output_ndim=[3])
ndim_correct_dtype_wildcard = create_test_package(output_dtype=[None], output_ndim=[3])
dtype_correct_ndim_wildcard = create_test_package(output_dtype=types.UINT8)
dtype_incorrect = create_test_package(output_dtype=[types.FLOAT], output_ndim=[3])
ndim_incorrect = create_test_package(output_dtype=types.UINT8, output_ndim=0)
both_correct_one_list = create_test_package(output_dtype=types.UINT8, output_ndim=[3])
too_many_dtypes = create_test_package(output_dtype=[types.UINT8, types.FLOAT])
correct_dtypes_but_too_many = create_test_package(output_dtype=[types.UINT8, types.UINT8])
correct_ndims_but_too_many = create_test_package(output_ndim=[3, 3])
all_wildcards = create_test_package()
correct_test_packages = [
both_correct, ndim_correct_dtype_wildcard, dtype_correct_ndim_wildcard,
both_correct_one_list, all_wildcards
]
test_ndim_packages_with_raise = [ndim_incorrect]
test_dtype_packages_with_raise = [dtype_incorrect]
test_packages_length_mismatch = [
too_many_dtypes, correct_dtypes_but_too_many, correct_ndims_but_too_many
]
for pipe_under_test, dtype, ndim in correct_test_packages:
yield check_dtype_ndim, pipe_under_test, dtype, ndim, 1
for pipe_under_test, dtype, ndim in test_ndim_packages_with_raise:
yield check_ndim_with_raise, pipe_under_test, dtype, ndim, 1
for pipe_under_test, dtype, ndim in test_dtype_packages_with_raise:
yield check_dtype_with_raise, pipe_under_test, dtype, ndim, 1
for pipe_under_test, dtype, ndim in test_packages_length_mismatch:
yield check_length_error, pipe_under_test, dtype, ndim, 1
def test_double_output_dtype_ndim():
@pipeline_def
def pipe(cast_labels):
inputs, labels = fn.readers.file(
file_root=os.path.join(get_dali_extra_path(), 'db', 'single', 'jpeg'), name="Reader")
decoded = fn.decoders.image(inputs, device="mixed", output_type=types.RGB)
labels_casted = fn.cast(labels, dtype=types.UINT8)
return decoded, labels_casted if cast_labels else labels
def create_test_package(output_dtype=None, output_ndim=None, cast_labels=False):
return pipe(batch_size=1, num_threads=1, device_id=0, output_dtype=output_dtype,
output_ndim=output_ndim, cast_labels=cast_labels), output_dtype, output_ndim
both_correct = create_test_package(output_dtype=[types.UINT8, types.INT32], output_ndim=[3, 1])
ndim_correct_dtype_wildcard = create_test_package(output_dtype=[None, None], output_ndim=[3, 1])
dtype_correct_ndim_wildcard = create_test_package(output_dtype=[types.UINT8, types.UINT8],
cast_labels=True)
dtype_incorrect = create_test_package(output_dtype=[types.UINT8, types.FLOAT])
ndim_incorrect = create_test_package(output_ndim=[3, 3])
dtype_broadcast = create_test_package(output_dtype=types.UINT8, cast_labels=True)
wildcard_in_dtype = create_test_package(output_dtype=[types.UINT8, None])
wildcard_in_ndim = create_test_package(output_ndim=[3, None])
not_enough_dtypes = create_test_package(output_dtype=[types.UINT8])
not_enough_ndim = create_test_package(output_ndim=[1])
all_wildcards = create_test_package()
all_wildcards_but_shapes_dont_match = create_test_package(output_dtype=[None, None],
output_ndim=[None])
correct_test_packages = [
both_correct, ndim_correct_dtype_wildcard, dtype_correct_ndim_wildcard, dtype_broadcast,
wildcard_in_dtype, wildcard_in_ndim, all_wildcards
]
test_ndim_packages_with_raise = [ndim_incorrect]
test_dtype_packages_with_raise = [dtype_incorrect]
test_packages_length_mismatch = [
not_enough_ndim, not_enough_dtypes, all_wildcards_but_shapes_dont_match
]
for pipe_under_test, dtype, ndim in correct_test_packages:
yield check_dtype_ndim, pipe_under_test, dtype, ndim, 2
for pipe_under_test, dtype, ndim in test_ndim_packages_with_raise:
yield check_ndim_with_raise, pipe_under_test, dtype, ndim, 2
for pipe_under_test, dtype, ndim in test_dtype_packages_with_raise:
yield check_dtype_with_raise, pipe_under_test, dtype, ndim, 2
for pipe_under_test, dtype, ndim in test_packages_length_mismatch:
yield check_length_error, pipe_under_test, dtype, ndim, 2
with assert_raises(ValueError, glob="*must be non-negative*"):
create_test_package(output_ndim=-1)
create_test_package(output_ndim=-2137)
with assert_raises(TypeError, glob="*must be either*"):
create_test_package(output_dtype=int)
with assert_raises(ValueError, glob="*types.NO_TYPE*"):
create_test_package(output_dtype=types.NO_TYPE)
|
DALI-main
|
dali/test/python/test_pipeline.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import nvidia.dali.fn as fn
from nvidia.dali import pipeline_def
from test_utils import get_dali_extra_path
import numpy as np
data_root = get_dali_extra_path()
jpeg_file = os.path.join(data_root, 'db', 'single', 'jpeg', '510', 'ship-1083562_640.jpg')
batch_size = 4
def cb(sample_info):
encoded_img = np.fromfile(jpeg_file, dtype=np.uint8)
label = 1
return encoded_img, np.int32([label])
@pipeline_def
def simple_pipeline():
jpegs, labels = fn.external_source(source=cb, num_outputs=2, parallel=True, batch=False)
images = fn.decoders.image(jpegs, device="cpu")
return images, labels
def _test_no_segfault(method, workers_num):
"""
This may cause segmentation fault on Python teardown if shared memory wrappers managed by the
py_pool are garbage collected before pipeline's backend
"""
pipe = simple_pipeline(
py_start_method=method, py_num_workers=workers_num,
batch_size=batch_size, num_threads=4, prefetch_queue_depth=2, device_id=0)
pipe.build()
pipe.run()
def test_no_segfault():
import multiprocessing
import signal
for method in ['fork', 'spawn']:
# Repeat test a few times as garbage collection order failure is subject to race condition
# and tended to exit properly once in a while
for _ in range(2):
for workers_num in range(1, 5):
mp = multiprocessing.get_context("spawn")
process = mp.Process(target=_test_no_segfault, args=(method, workers_num))
process.start()
process.join()
if process.exitcode != os.EX_OK:
if signal.SIGSEGV == -process.exitcode:
raise RuntimeError("Process terminated with signal SIGSEGV")
raise RuntimeError("Process exited with {} code".format(process.exitcode))
|
DALI-main
|
dali/test/python/test_external_source_parallel_garbage_collection_order.py
|
# Copyright (c) 2020, 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali.fn as fn
import nvidia.dali.ops as ops
import torch
from nvidia.dali.backend_impl import * # noqa: F401, F403
from nvidia.dali import Pipeline
from torch.utils.dlpack import to_dlpack, from_dlpack
from test_utils import check_output
class TestIterator():
def __init__(self, n, batch_size, dims=[2], as_tensor=False, device="cuda"):
self.batch_size = batch_size
self.dims = dims
self.n = n
self.as_tensor = as_tensor
self.i = 0
self.device = device
def __len__(self):
return self.n
def __iter__(self):
# return a copy, so that the iteration number doesn't collide
return TestIterator(self.n, self.batch_size, self.dims, self.as_tensor, self.device)
def __next__(self):
np.random.seed(12345 * self.i + 4321)
torch.random.manual_seed(12345 * self.i + 4321)
def generate(dim):
shape = np.random.randint(1, 10, [dim]).tolist()
if self.as_tensor:
data = to_dlpack(torch.rand(size=[self.batch_size] + shape, device=self.device))
else:
data = [to_dlpack(torch.rand(shape, device=self.device)) for _ in
range(self.batch_size)]
return data
if self.i < self.n:
self.i += 1
if isinstance(self.dims, (list, tuple)):
return [generate(d) for d in self.dims]
else:
return generate(self.dims)
else:
self.i = 0
raise StopIteration
next = __next__
def asnumpy(x, device):
if x is None:
return None
if isinstance(x, list):
return [asnumpy(y, device) for y in x]
if isinstance(x, np.ndarray):
return x
if device == "cpu":
return from_dlpack(x).numpy()
else:
return from_dlpack(x).cpu().numpy()
def run_and_check(pipe, ref_iterable):
iter_ref = iter(ref_iterable)
i = 0
while True:
try:
pipe_out = pipe.run()
data = next(iter_ref)
data = asnumpy(data, iter_ref.device)
check_output(pipe_out, data)
i += 1
except StopIteration:
break
assert i == len(ref_iterable)
def _test_iter_setup(use_fn_api, by_name, src_device, gen_device):
batch_size = 7
class IterSetupPipeline(Pipeline):
def __init__(self, iterator, num_threads, device_id, src_device):
super().__init__(
batch_size=iterator.batch_size,
num_threads=num_threads,
device_id=device_id)
self.iterator = iterator
self._device = src_device
def define_graph(self):
if use_fn_api:
# pass a Torch stream where data is generated
self.batch_1 = fn.external_source(device=self._device, name="src1",
cuda_stream=torch.cuda.default_stream())
self.batch_2 = fn.external_source(device=self._device, name="src2",
cuda_stream=torch.cuda.default_stream())
else:
input_1 = ops.ExternalSource(device=self._device)
input_2 = ops.ExternalSource(device=self._device)
self.batch_1 = input_1(name="src1")
self.batch_2 = input_2(name="src2")
return [self.batch_1, self.batch_2]
def iter_setup(self):
batch_1, batch_2 = next(self.iterator)
if by_name:
# pass a Torch stream where data is generated
self.feed_input("src1", batch_1, cuda_stream=torch.cuda.default_stream())
self.feed_input("src2", batch_2, cuda_stream=torch.cuda.default_stream())
else:
# pass a Torch stream where data is generated
self.feed_input(self.batch_1, batch_1, cuda_stream=torch.cuda.default_stream())
self.feed_input(self.batch_2, batch_2, cuda_stream=torch.cuda.default_stream())
iter_num = 5
source = TestIterator(n=iter_num, batch_size=batch_size, dims=[2, 3], device=gen_device)
pipe = IterSetupPipeline(iter(source), 3, 0, src_device)
pipe.build()
run_and_check(pipe, source)
def test_iter_setup():
for use_fn_api in [False, True]:
for by_name in [False, True]:
for src_device in ["cpu", "gpu"]:
for gen_device in ["cpu", "cuda"]:
yield _test_iter_setup, use_fn_api, by_name, src_device, gen_device
def _test_external_source_callback_torch_stream(src_device, gen_device):
with torch.cuda.stream(torch.cuda.Stream()):
for attempt in range(10):
t0 = torch.tensor([attempt * 100 + 1.5], dtype=torch.float32, device=gen_device)
increment = torch.tensor([10], dtype=torch.float32, device=gen_device)
pipe = Pipeline(1, 3, 0)
def gen_batch():
nonlocal t0
t0 += increment
return [to_dlpack(t0)]
pipe.set_outputs(fn.external_source(source=gen_batch, device=src_device,
cuda_stream=torch.cuda.current_stream()))
pipe.build()
for i in range(10):
check_output(pipe.run(),
[np.array([attempt * 100 + (i + 1) * 10 + 1.5], dtype=np.float32)])
def test_external_source_callback_torch_stream():
for src_device in ["cpu", "gpu"]:
for gen_device in ["cpu", "cuda"]:
yield _test_external_source_callback_torch_stream, src_device, gen_device
|
DALI-main
|
dali/test/python/test_external_source_pytorch_dlpack.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.backend_impl import TensorGPU, TensorListGPU
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.tensors as tensors
import nvidia.dali.types as types
import numpy as np
from nose_utils import assert_raises, raises
import cupy as cp
from test_utils import py_buffer_from_address
class ExternalSourcePipe(Pipeline):
def __init__(self, batch_size, data, use_copy_kernel=False):
super(ExternalSourcePipe, self).__init__(batch_size, 1, 0)
self.output = ops.ExternalSource(device="gpu")
self.data = data
self.use_copy_kernel = use_copy_kernel
def define_graph(self):
self.out = self.output()
return self.out
def iter_setup(self):
self.feed_input(self.out, self.data, use_copy_kernel=self.use_copy_kernel)
def test_tensorlist_getitem_gpu():
arr = np.random.rand(3, 5, 6)
pipe = ExternalSourcePipe(arr.shape[0], arr)
pipe.build()
tensorlist = pipe.run()[0]
list_of_tensors = [x for x in tensorlist]
assert type(tensorlist[0]) is not cp.ndarray
assert type(tensorlist[0]) is TensorGPU
assert type(tensorlist[-3]) is TensorGPU
assert len(list_of_tensors) == len(tensorlist)
with assert_raises(IndexError, glob="TensorListCPU index out of range"):
tensorlist[len(tensorlist)]
with assert_raises(IndexError, glob="TensorListCPU index out of range"):
tensorlist[-len(tensorlist) - 1]
def test_data_ptr_tensor_gpu():
arr = np.random.rand(3, 5, 6)
pipe = ExternalSourcePipe(arr.shape[0], arr)
pipe.build()
tensor = pipe.run()[0][0]
from_tensor = py_buffer_from_address(
tensor.data_ptr(), tensor.shape(), types.to_numpy_type(tensor.dtype), gpu=True)
# from_tensor is cupy array, convert arr to cupy as well
assert cp.allclose(arr[0], from_tensor)
def test_data_ptr_tensor_list_gpu():
arr = np.random.rand(3, 5, 6)
pipe = ExternalSourcePipe(arr.shape[0], arr)
pipe.build()
tensor_list = pipe.run()[0]
tensor = tensor_list.as_tensor()
from_tensor = py_buffer_from_address(
tensor_list.data_ptr(), tensor.shape(), types.to_numpy_type(tensor.dtype), gpu=True)
# from_tensor is cupy array, convert arr to cupy as well
assert cp.allclose(arr, from_tensor)
def test_cuda_array_interface_tensor_gpu():
arr = np.random.rand(3, 5, 6)
pipe = ExternalSourcePipe(arr.shape[0], arr)
pipe.build()
tensor_list = pipe.run()[0]
assert tensor_list[0].__cuda_array_interface__['data'][0] == tensor_list[0].data_ptr()
assert not tensor_list[0].__cuda_array_interface__['data'][1]
assert np.array_equal(tensor_list[0].__cuda_array_interface__['shape'], tensor_list[0].shape())
type_str = tensor_list[0].__cuda_array_interface__['typestr']
dtype = types.to_numpy_type(tensor_list[0].dtype)
assert np.dtype(type_str) == np.dtype(dtype)
assert cp.allclose(arr[0], cp.asanyarray(tensor_list[0]))
def test_cuda_array_interface_tensor_gpu_create():
arr = np.random.rand(3, 5, 6)
pipe = ExternalSourcePipe(arr.shape[0], arr)
pipe.build()
tensor_list = pipe.run()[0]
assert cp.allclose(arr[0], cp.asanyarray(tensor_list[0]))
def test_cuda_array_interface_tensor_list_gpu_create():
arr = np.random.rand(3, 5, 6)
pipe = ExternalSourcePipe(arr.shape[0], arr)
pipe.build()
tensor_list = pipe.run()[0]
assert cp.allclose(arr, cp.asanyarray(tensor_list.as_tensor()))
def test_cuda_array_interface_tensor_gpu_create_copy_kernel():
arr = np.random.rand(3, 5, 6)
pipe = ExternalSourcePipe(arr.shape[0], arr, use_copy_kernel=True)
pipe.build()
tensor_list = pipe.run()[0]
assert cp.allclose(arr[0], cp.asanyarray(tensor_list[0]))
def test_cuda_array_interface_tensor_list_gpu_create_copy_kernel():
arr = np.random.rand(3, 5, 6)
pipe = ExternalSourcePipe(arr.shape[0], arr, use_copy_kernel=True)
pipe.build()
tensor_list = pipe.run()[0]
assert cp.allclose(arr, cp.asanyarray(tensor_list.as_tensor()))
def test_cuda_array_interface_tensor_gpu_direct_creation():
arr = cp.random.rand(3, 5, 6)
tensor = TensorGPU(arr, "NHWC")
assert cp.allclose(arr, cp.asanyarray(tensor))
def test_dlpack_tensor_gpu_direct_creation():
arr = cp.random.rand(3, 5, 6)
tensor = TensorGPU(arr.toDlpack())
assert cp.allclose(arr, cp.asanyarray(tensor))
def test_cuda_array_interface_tensor_gpu_to_cpu():
arr = cp.random.rand(3, 5, 6)
tensor = TensorGPU(arr, "NHWC")
assert np.allclose(arr.get(), tensor.as_cpu())
def test_dlpack_tensor_gpu_to_cpu():
arr = cp.random.rand(3, 5, 6)
tensor = TensorGPU(arr.toDlpack(), "NHWC")
assert np.allclose(arr.get(), tensor.as_cpu())
def test_cuda_array_interface_tensor_gpu_to_cpu_device_id():
arr = cp.random.rand(3, 5, 6)
tensor = TensorGPU(arr, "NHWC", 0)
assert np.allclose(arr.get(), tensor.as_cpu())
def test_cuda_array_interface_tensor_list_gpu_direct_creation():
arr = cp.random.rand(3, 5, 6)
tensor_list = TensorListGPU(arr, "NHWC")
assert cp.allclose(arr, cp.asanyarray(tensor_list.as_tensor()))
def test_cuda_array_interface_tensor_list_gpu_direct_creation_list():
arr = cp.random.rand(3, 5, 6)
tensor_list = TensorListGPU([arr], "NHWC")
assert cp.allclose(arr.reshape(tuple([1]) + arr.shape), cp.asanyarray(tensor_list.as_tensor()))
def test_dlpack_tensor_list_gpu_direct_creation():
arr = cp.random.rand(3, 5, 6)
tensor_list = TensorListGPU(arr.toDlpack(), "NHWC")
assert cp.allclose(arr, cp.asanyarray(tensor_list.as_tensor()))
def test_dlpack_tensor_list_gpu_direct_creation_list():
arr = cp.random.rand(3, 5, 6)
tensor_list = TensorListGPU([arr.toDlpack()], "NHWC")
assert cp.allclose(arr.reshape(tuple([1]) + arr.shape), cp.asanyarray(tensor_list.as_tensor()))
def test_cuda_array_interface_tensor_list_gpu_to_cpu():
arr = cp.random.rand(3, 5, 6)
tensor_list = TensorListGPU(arr, "NHWC")
assert np.allclose(arr.get(), tensor_list.as_cpu().as_tensor())
def test_dlpack_tensor_list_gpu_to_cpu():
arr = cp.random.rand(3, 5, 6)
tensor_list = TensorListGPU(arr.toDlpack(), "NHWC")
assert cp.allclose(arr, cp.asanyarray(tensor_list.as_tensor()))
def test_cuda_array_interface_tensor_list_gpu_to_cpu_device_id():
arr = cp.random.rand(3, 5, 6)
tensor_list = TensorListGPU(arr, "NHWC", 0)
assert np.allclose(arr.get(), tensor_list.as_cpu().as_tensor())
def check_cuda_array_types(t):
arr = cp.array([[-0.39, 1.5], [-1.5, 0.33]], dtype=t)
tensor = TensorGPU(arr, "NHWC")
assert cp.allclose(arr, cp.asanyarray(tensor))
def test_cuda_array_interface_types():
for t in [cp.bool_, cp.int8, cp.int16, cp.int32, cp.int64, cp.uint8,
cp.uint16, cp.uint32, cp.uint64, cp.float64, cp.float32, cp.float16]:
yield check_cuda_array_types, t
def check_dlpack_types(t):
arr = cp.array([[-0.39, 1.5], [-1.5, 0.33]], dtype=t)
tensor = TensorGPU(arr.toDlpack(), "NHWC")
assert cp.allclose(arr, cp.asanyarray(tensor))
def test_dlpack_interface_types():
for t in [cp.int8, cp.int16, cp.int32, cp.int64, cp.uint8,
cp.uint16, cp.uint32, cp.uint64, cp.float64, cp.float32, cp.float16]:
yield check_dlpack_types, t
@raises(RuntimeError, glob="Provided object doesn't support cuda array interface protocol.")
def test_cuda_array_interface_tensor_gpu_create_from_numpy():
arr = np.random.rand(3, 5, 6)
TensorGPU(arr, "NHWC")
@raises(RuntimeError, glob="Provided object doesn't support cuda array interface protocol.")
def test_cuda_array_interface_tensor_list_gpu_create_from_numpy():
arr = np.random.rand(3, 5, 6)
TensorGPU(arr, "NHWC")
def test_tensor_gpu_squeeze():
def check_squeeze(shape, dim, in_layout, expected_out_layout):
arr = cp.random.rand(*shape)
t = TensorGPU(arr, in_layout)
is_squeezed = t.squeeze(dim)
should_squeeze = (len(expected_out_layout) < len(in_layout))
arr_squeeze = arr.squeeze(dim)
t_shape = tuple(t.shape())
assert t_shape == arr_squeeze.shape, f"{t_shape} != {arr_squeeze.shape}"
assert t.layout() == expected_out_layout, f"{t.layout()} != {expected_out_layout}"
assert cp.allclose(arr_squeeze, cp.asanyarray(t))
assert is_squeezed == should_squeeze, f"{is_squeezed} != {should_squeeze}"
for dim, shape, in_layout, expected_out_layout in \
[(None, (3, 5, 6), "ABC", "ABC"),
(None, (3, 1, 6), "ABC", "AC"),
(1, (3, 1, 6), "ABC", "AC"),
(-2, (3, 1, 6), "ABC", "AC"),
(None, (1, 1, 6), "ABC", "C"),
(1, (1, 1, 6), "ABC", "AC"),
(None, (1, 1, 1), "ABC", ""),
(None, (1, 5, 1), "ABC", "B"),
(-1, (1, 5, 1), "ABC", "AB"),
(0, (1, 5, 1), "ABC", "BC"),
(None, (3, 5, 1), "ABC", "AB")]:
yield check_squeeze, shape, dim, in_layout, expected_out_layout
# Those tests verify that the Tensor[List]Cpu/Gpu created in Python in a similar fashion
# to how ExternalSource for samples operates keep the data alive.
# The Tensor[List] take the pointer to data and store the reference to buffer/object that owns
# the data to keep the refcount positive while the Tensor[List] lives.
# Without this behaviour there was observable bug with creating several temporary
# buffers in the loop and DALI not tracking references to them
def test_tensor_cpu_from_numpy():
def create_tmp(idx):
a = np.full((4, 4), idx)
return tensors.TensorCPU(a, "")
out = [create_tmp(i) for i in range(4)]
for i, t in enumerate(out):
np.testing.assert_array_equal(np.array(t), np.full((4, 4), i))
def test_tensor_list_cpu_from_numpy():
def create_tmp(idx):
a = np.full((4, 4), idx)
return tensors.TensorListCPU(a, "")
out = [create_tmp(i) for i in range(4)]
for i, tl in enumerate(out):
np.testing.assert_array_equal(tl.as_array(), np.full((4, 4), i))
def test_tensor_from_tensor_list_cpu():
def create_tl(idx):
a = np.full((3, 4), idx)
return tensors.TensorListCPU(a, "")
out = []
for i in range(5):
ts = [t for t in create_tl(i)]
out += ts
for i, t in enumerate(out):
np.testing.assert_array_equal(np.array(t), np.full((4,), i // 3))
def test_tensor_gpu_from_cupy():
def create_tmp(idx):
a = np.full((4, 4), idx)
a_gpu = cp.array(a, dtype=a.dtype)
return tensors.TensorGPU(a_gpu, "")
out = [create_tmp(i) for i in range(4)]
for i, t in enumerate(out):
np.testing.assert_array_equal(np.array(t.as_cpu()), np.full((4, 4), i))
def test_tensor_list_gpu_from_cupy():
def create_tmp(idx):
a = np.full((4, 4), idx)
a_gpu = cp.array(a, dtype=a.dtype)
return tensors.TensorListGPU(a_gpu, "")
out = [create_tmp(i) for i in range(4)]
for i, tl in enumerate(out):
for j in range(4):
np.testing.assert_array_equal(np.array(tl[j].as_cpu()), np.full(tl[j].shape(), i))
np.testing.assert_array_equal(tl.as_cpu().as_array(), np.full((4, 4), i))
def test_tensor_from_tensor_list_gpu():
def create_tl(idx):
a = np.full((3, 4), idx)
a_gpu = cp.array(a, dtype=a.dtype)
return tensors.TensorListGPU(a_gpu, "")
out = []
for i in range(5):
ts = [t for t in create_tl(i)]
out += ts
for i, t in enumerate(out):
np.testing.assert_array_equal(np.array(t.as_cpu()), np.full((4,), i // 3))
def test_tensor_expose_dlpack_capsule():
arr = cp.arange(20)
tensor = TensorGPU(arr, "NHWC")
capsule = tensor._expose_dlpack_capsule()
arr_from_dlpack = cp.from_dlpack(capsule)
assert cp.array_equal(arr, arr_from_dlpack)
|
DALI-main
|
dali/test/python/test_backend_impl_gpu.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import math
from test_utils import get_gpu_num
from test_utils import get_dali_extra_path
import nvidia.dali.ops as ops
import nvidia.dali.types as types
from nvidia.dali.pipeline import Pipeline, pipeline_def
import nvidia.dali.fn as fn
import numpy as np
import tempfile
from nose_utils import assert_raises, raises
VIDEO_DIRECTORY = "/tmp/video_files"
PLENTY_VIDEO_DIRECTORY = "/tmp/many_video_files"
VIDEO_FILES = os.listdir(VIDEO_DIRECTORY)
PLENTY_VIDEO_FILES = os.listdir(PLENTY_VIDEO_DIRECTORY)
VIDEO_FILES = [VIDEO_DIRECTORY + '/' + f for f in VIDEO_FILES]
PLENTY_VIDEO_FILES = [PLENTY_VIDEO_DIRECTORY + '/' + f for f in PLENTY_VIDEO_FILES]
FILE_LIST = "/tmp/file_list.txt"
MUTLIPLE_RESOLUTION_ROOT = '/tmp/video_resolution/'
test_data_root = get_dali_extra_path()
video_data_root = os.path.join(test_data_root, 'db', 'video')
corrupted_video_data_root = os.path.join(video_data_root, 'corrupted')
video_containers_data_root = os.path.join(test_data_root, 'db', 'video', 'containers')
video_types = ['avi', 'mov', 'mkv', 'mpeg']
ITER = 6
BATCH_SIZE = 4
COUNT = 5
class VideoPipe(Pipeline):
def __init__(self, batch_size, data, shuffle=False, stride=1, step=-1, device_id=0,
num_shards=1, dtype=types.FLOAT, sequence_length=COUNT):
super().__init__(batch_size, num_threads=2, device_id=device_id, seed=12)
self.input = ops.readers.Video(device="gpu", filenames=data,
sequence_length=sequence_length,
shard_id=0, num_shards=num_shards,
random_shuffle=shuffle,
normalized=True,
image_type=types.YCbCr,
dtype=dtype, step=step,
stride=stride)
def define_graph(self):
output = self.input(name="Reader")
return output
class VideoPipeList(Pipeline):
def __init__(self, batch_size, data, device_id=0, sequence_length=COUNT, step=-1, stride=1,
file_list_frame_num=True, file_list_include_preceding_frame=False,
skip_vfr_check=False):
super().__init__(batch_size, num_threads=2, device_id=device_id)
self.input = ops.readers.Video(
device="gpu", file_list=data, sequence_length=sequence_length, step=step,
stride=stride, file_list_frame_num=file_list_frame_num,
file_list_include_preceding_frame=file_list_include_preceding_frame,
skip_vfr_check=skip_vfr_check)
def define_graph(self):
output = self.input(name="Reader")
return output
class VideoPipeRoot(Pipeline):
def __init__(self, batch_size, data, device_id=0, sequence_length=COUNT):
super().__init__(batch_size, num_threads=2, device_id=device_id)
self.input = ops.readers.Video(
device="gpu", file_root=data, sequence_length=sequence_length, random_shuffle=True)
def define_graph(self):
output = self.input(name="Reader")
return output
def test_simple_videopipeline():
pipe = VideoPipe(batch_size=BATCH_SIZE, data=VIDEO_FILES)
pipe.build()
for i in range(ITER):
print("Iter " + str(i))
out = pipe.run()
assert out[0].layout() == "FHWC"
del pipe
def test_wrong_length_sequence_videopipeline():
pipe = VideoPipe(batch_size=BATCH_SIZE, data=VIDEO_FILES, sequence_length=100000)
with assert_raises(RuntimeError, glob='There are no valid sequences in the provided dataset'):
pipe.build()
def check_videopipeline_supported_type(dtype):
pipe = VideoPipe(batch_size=BATCH_SIZE, data=VIDEO_FILES, dtype=dtype)
pipe.build()
for i in range(ITER):
print("Iter " + str(i))
_ = pipe.run()
del pipe
@raises(RuntimeError, glob='Data type must be FLOAT or UINT8')
def check_videopipeline_unsupported_type(dtype):
pipe = VideoPipe(batch_size=BATCH_SIZE, data=VIDEO_FILES, dtype=dtype)
pipe.build()
SUPPORTED_TYPES = [types.DALIDataType.FLOAT, types.DALIDataType.UINT8]
ALL_TYPES = list(types.DALIDataType.__members__.values())
def test_simple_videopipeline_supported_types():
for type in SUPPORTED_TYPES:
yield check_videopipeline_supported_type, type
def test_simple_videopipeline_not_supported_types():
for type in set(ALL_TYPES) - set(SUPPORTED_TYPES):
yield check_videopipeline_unsupported_type, type
def test_file_list_videopipeline():
pipe = VideoPipeList(batch_size=BATCH_SIZE, data=FILE_LIST)
pipe.build()
for i in range(ITER):
print("Iter " + str(i))
_ = pipe.run()
del pipe
def _test_file_list_starts_videopipeline(start, end):
files = sorted(os.listdir(VIDEO_DIRECTORY))
list_file = tempfile.NamedTemporaryFile(mode="w", delete=False)
list_file.write("{} {}\n".format(os.path.join(VIDEO_DIRECTORY, files[0]), 0))
list_file.close()
pipe = VideoPipeList(batch_size=BATCH_SIZE, data=list_file.name, sequence_length=1)
pipe.build()
reference_seq_num = pipe.reader_meta("Reader")["epoch_size"]
del pipe
os.remove(list_file.name)
list_file = tempfile.NamedTemporaryFile(mode="w", delete=False)
if end is None:
list_file.write("{} {} {}\n".format(os.path.join(VIDEO_DIRECTORY, files[0]), 0, start))
else:
list_file.write(
"{} {} {} {}\n".format(os.path.join(VIDEO_DIRECTORY, files[0]), 0, start, end))
list_file.close()
pipe = VideoPipeList(batch_size=BATCH_SIZE, data=list_file.name, sequence_length=1)
pipe.build()
seq_num = pipe.reader_meta("Reader")["epoch_size"]
expected_seq_num = reference_seq_num
if start > 0:
expected_seq_num -= start
elif start < 0:
expected_seq_num = -start
if end is not None:
if end > 0:
expected_seq_num -= (reference_seq_num - end)
elif end < 0:
expected_seq_num += end
assert expected_seq_num == seq_num, "Reference is {}, expected is {}, obtained {}".format(
reference_seq_num, expected_seq_num, seq_num)
os.remove(list_file.name)
def test_file_list_starts_ends_videopipeline():
ranges = [
[0, None],
[1, None],
[0, -1],
[2, None],
[0, -2],
[0, 1],
[-1, None],
[-3, -1]
]
for r in ranges:
yield _test_file_list_starts_videopipeline, r[0], r[1]
def test_source_info():
files = []
for cont in video_types:
path = os.path.join(video_containers_data_root, cont)
files += [path + '/' + f for f in os.listdir(path)]
files = sorted(files)
list_file = tempfile.NamedTemporaryFile(mode="w", delete=False)
for f in files:
list_file.write("{} {} {} {}\n".format(f, 0, 0, 1))
list_file.close()
pipe = VideoPipeList(batch_size=BATCH_SIZE, data=list_file.name, sequence_length=1,
skip_vfr_check=True)
pipe.build()
samples_read = 0
while samples_read < len(files):
o = pipe.run()
for idx, t in enumerate(o[0]):
assert t.source_info() == files[(samples_read + idx) % len(files)]
samples_read += BATCH_SIZE
os.remove(list_file.name)
def _create_file_list_include_preceding_frame_pipe(file_list_include_preceding_frame):
files = sorted(os.listdir(VIDEO_DIRECTORY))
list_file = tempfile.NamedTemporaryFile(mode="w", delete=False)
# make sure that this is close enough to show only one frame
list_file.write(
"{} {} {} {}\n".format(os.path.join(VIDEO_DIRECTORY, files[0]), 0, 0.111, 0.112))
list_file.close()
pipe = VideoPipeList(
batch_size=BATCH_SIZE, data=list_file.name, sequence_length=1, file_list_frame_num=False,
file_list_include_preceding_frame=file_list_include_preceding_frame)
return pipe, list_file.name
def test_file_list_include_preceding_frame():
pipe, list_file_name = _create_file_list_include_preceding_frame_pipe(True)
pipe.build()
os.remove(list_file_name)
for _ in range(3):
pipe.run()
seq_num = pipe.reader_meta("Reader")["epoch_size"]
assert seq_num == 1, "Expected to get only 1 sample, received {}".format(seq_num)
def test_file_list_include_preceding_frame_fail():
pipe, list_file_name = _create_file_list_include_preceding_frame_pipe(False)
# there should be no valid sequences
expected_msg = "Start time number should be lesser or equal to end time for a file"
with assert_raises(RuntimeError, glob=expected_msg):
pipe.build()
os.remove(list_file_name)
def _test_file_list_invalid_range(start, end):
files = sorted(os.listdir(VIDEO_DIRECTORY))
list_file = tempfile.NamedTemporaryFile(mode="w", delete=False)
if end is None:
list_file.write("{} {} {}\n".format(os.path.join(VIDEO_DIRECTORY, files[0]), 0, start))
else:
list_file.write(
"{} {} {} {}\n".format(os.path.join(VIDEO_DIRECTORY, files[0]), 0, start, end))
list_file.close()
pipe = VideoPipeList(batch_size=BATCH_SIZE, data=list_file.name)
expected_msg = "Start frame number should be lesser or equal to end frame number for a file"
with assert_raises(RuntimeError, glob=expected_msg):
pipe.build()
os.remove(list_file.name)
def test_file_list_invalid_range():
invalid_ranges = [
[-1, 1],
[1000000, None],
[0, -1000]
]
for r in invalid_ranges:
yield _test_file_list_invalid_range, r[0], r[1]
def test_file_list_empty_range():
files = sorted(os.listdir(VIDEO_DIRECTORY))
list_file = tempfile.NamedTemporaryFile(mode="w", delete=False)
list_file.write("{} {} {} {}\n".format(os.path.join(VIDEO_DIRECTORY, files[0]), 0, 10, 10))
list_file.close()
pipe = VideoPipeList(batch_size=BATCH_SIZE, data=list_file.name)
with assert_raises(RuntimeError, glob='No files were read'):
pipe.build()
os.remove(list_file.name)
def test_step_video_pipeline():
pipe = VideoPipe(batch_size=BATCH_SIZE, data=VIDEO_FILES, step=1)
pipe.build()
for i in range(ITER):
print("Iter " + str(i))
_ = pipe.run()
del pipe
def test_stride_video_pipeline():
pipe = VideoPipe(batch_size=BATCH_SIZE, data=VIDEO_FILES, stride=3)
pipe.build()
for i in range(ITER):
print("Iter " + str(i))
_ = pipe.run()
del pipe
def test_multiple_resolution_videopipeline():
pipe = VideoPipeRoot(batch_size=BATCH_SIZE, data=MUTLIPLE_RESOLUTION_ROOT)
try:
pipe.build()
for i in range(ITER):
print("Iter " + str(i))
_ = pipe.run()
except Exception as e:
if str(e) == "Decoder reconfigure feature not supported":
print("Multiple resolution test skipped")
else:
raise
del pipe
def test_multi_gpu_video_pipeline():
gpus = get_gpu_num()
pipes = [
VideoPipe(batch_size=BATCH_SIZE, data=VIDEO_FILES, device_id=d, num_shards=gpus)
for d in range(gpus)]
for p in pipes:
p.build()
p.run()
def test_plenty_of_video_files():
"""
checks if the readers.Video can handle more than OS max open file limit of opened files at once
"""
# make sure that there is one sequence per video file
pipe = VideoPipe(
batch_size=BATCH_SIZE, data=PLENTY_VIDEO_FILES, step=1000000, sequence_length=1)
pipe.build()
iters = math.ceil(len(os.listdir(PLENTY_VIDEO_DIRECTORY)) / BATCH_SIZE)
for i in range(iters):
print("Iter " + str(i))
pipe.run()
@raises(RuntimeError,
glob='Could not open file * because of Invalid data found when processing input')
def check_corrupted_videos():
corrupted_videos = [
corrupted_video_data_root + '/' + f for f in os.listdir(corrupted_video_data_root)]
for corrupted in corrupted_videos:
pipe = Pipeline(batch_size=BATCH_SIZE, num_threads=4, device_id=0)
with pipe:
vid = fn.readers.video(device="gpu", filenames=corrupted, sequence_length=1)
pipe.set_outputs(vid)
pipe.build()
def test_corrupted_videos():
check_corrupted_videos()
def check_container(cont):
pipe = Pipeline(batch_size=1, num_threads=4, device_id=0)
path = os.path.join(video_containers_data_root, cont)
test_videos = [path + '/' + f for f in os.listdir(path)]
with pipe:
# mkv container for some reason fails in DALI VFR heuristics
vid = fn.readers.video(device="gpu", filenames=test_videos, sequence_length=10,
skip_vfr_check=True, stride=1, name="Reader")
pipe.set_outputs(vid)
pipe.build()
iter_num = pipe.reader_meta("Reader")["epoch_size"]
for _ in range(iter_num):
pipe.run()
def test_container():
for cont in video_types:
yield check_container, cont
def test_pad_sequence():
def get_epoch_size(pipe):
meta = pipe.reader_meta()
return list(meta.values())[0]['epoch_size']
@pipeline_def(batch_size=1, num_threads=4, device_id=0)
def create_video_pipe(filenames, sequence_length=1, stride=1, step=-1, pad_sequences=False):
fr, lab, fr_num, time_stamp = fn.readers.video(
device="gpu", filenames=filenames, labels=[], sequence_length=sequence_length,
shard_id=0, num_shards=1, enable_timestamps=True, enable_frame_num=True,
random_shuffle=False, skip_vfr_check=True, step=step, stride=stride,
pad_last_batch=True, pad_sequences=pad_sequences)
return fr, lab, fr_num, time_stamp
video_filename = [
os.path.join(video_data_root, 'sintel', 'video_files', 'sintel_trailer-720p_2.mp4')
]
dali_pipe = create_video_pipe(video_filename)
dali_pipe.build()
total_number_of_frames = get_epoch_size(dali_pipe)
sequence_length = 4
stride = sequence_length // 2
batch_size = 2
# second sequence should have only half of the frames
step = total_number_of_frames - (stride * sequence_length // 2 - 1)
dali_pipe = create_video_pipe(
batch_size=batch_size, filenames=video_filename, sequence_length=sequence_length,
stride=stride, step=step, pad_sequences=True)
dali_pipe.build()
assert get_epoch_size(dali_pipe) == 2
last_sample_frame_count = 1 + (total_number_of_frames - 1 - step) // stride
assert last_sample_frame_count < sequence_length
out = dali_pipe.run()
padded_sampl = 1
# check padded sample
# non padded frames should not be 0
assert np.any(np.array(out[0].as_cpu()[padded_sampl])[0:last_sample_frame_count]) != 0
# while padded one only 0
assert np.all(np.array(out[0].as_cpu()[padded_sampl])[last_sample_frame_count + 1:]) == 0
assert np.array(out[2].as_cpu()[padded_sampl]) == step
# non padded samples should have non negative timestamps
non_padded = np.array(out[3].as_cpu()[padded_sampl])[0:last_sample_frame_count]
assert np.all(non_padded != np.array([-1] * last_sample_frame_count))
# while padded one only -1
padded = np.array(out[3].as_cpu()[padded_sampl])[last_sample_frame_count + 1:]
assert np.all(padded == np.array([-1] * (sequence_length - last_sample_frame_count)))
dali_pipe = create_video_pipe(
batch_size=2, filenames=video_filename, sequence_length=sequence_length,
stride=stride, step=step, pad_sequences=False)
dali_pipe.build()
# when sequence padding if off we should get only one valid sample in the epoch
assert get_epoch_size(dali_pipe) == 1
def divisor_generator(n, max_val):
for i in range(max_val + 1, 1, -1):
if n % i == 0:
return i
dali_pipe = create_video_pipe(batch_size=1, filenames=video_filename, sequence_length=1,
stride=1, pad_sequences=False)
dali_pipe.build()
# to speed things up read as close as 30 frames at the time, but in the way that the sequences
# cover the whole video (without padding)
ref_sequence_length = divisor_generator(get_epoch_size(dali_pipe), 30)
# extract frames from the test video without padding and compare with one from padded pipeline
dali_pipe = create_video_pipe(
batch_size=1, filenames=video_filename, sequence_length=ref_sequence_length,
stride=1, pad_sequences=False)
dali_pipe.build()
ts_index = 0
sampl_idx = 0
for _ in range(get_epoch_size(dali_pipe)):
ref_out = dali_pipe.run()
# run over all frame timestamps and compare them with one from the tested pipeline
for ref_idx in range(ref_sequence_length):
# if we get into padded samples break
if np.array(out[3].as_cpu()[sampl_idx])[ts_index] == -1:
break
# if there is a match compare frames itself and move to next timestamp/sample
# from the tested batch
sample_stamp = np.array(out[3].as_cpu()[sampl_idx])
ref_sample_stamp = np.array(ref_out[3].as_cpu()[0])
if sample_stamp[ts_index] == ref_sample_stamp[ref_idx]:
sample = np.array(out[0].as_cpu()[sampl_idx])
sample_ref = np.array(ref_out[0].as_cpu()[0])
assert np.all(sample[ts_index] == sample_ref[ref_idx])
ts_index += 1
if ts_index == sequence_length:
ts_index = 0
sampl_idx += 1
# it should break earlier and not get here at all, as we expect to have padded
# sample in the tested pipeline
if sampl_idx == batch_size:
assert False
assert sampl_idx == padded_sampl
assert ts_index == last_sample_frame_count
|
DALI-main
|
dali/test/python/test_video_pipeline.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali._utils import external_source_impl
from nvidia.dali import tensors, pipeline_def
import nvidia.dali.fn as fn
from nose.tools import assert_equals
from nose_utils import raises
from nose.plugins.attrib import attr
import numpy as np
def passes_assert(callback, sample):
assert_equals(callback(sample), True)
def converts(callback, sample, baseline):
np.testing.assert_array_equal(callback(sample), baseline)
test_array = np.array([[42, 42], [42, 42]], dtype=np.uint8)
def run_checks(samples_allowed, batches_allowed, samples_disallowed, batches_disallowed):
for sample, baseline in samples_allowed:
yield passes_assert, external_source_impl.assert_cpu_sample_data_type, sample
yield converts, external_source_impl.sample_to_numpy, sample, baseline
for sample, baseline in samples_allowed + batches_allowed:
yield passes_assert, external_source_impl.assert_cpu_batch_data_type, sample
yield converts, external_source_impl.batch_to_numpy, sample, baseline
for sample in samples_disallowed:
yield raises(TypeError, "Unsupported callback return type.")(
external_source_impl.assert_cpu_sample_data_type), sample
for sample in samples_disallowed + batches_disallowed:
yield raises(TypeError, "Unsupported callback return type")(
external_source_impl.assert_cpu_batch_data_type), sample
def non_uniform_tl():
def get_samples():
return [np.array([42, 42]), np.array([1, 2, 3])]
@pipeline_def(batch_size=2, num_threads=4, device_id=0)
def pipe():
return fn.external_source(source=get_samples)
p = pipe()
p.build()
return p.run()[0]
def test_regular_containers():
samples_cpu = [
(test_array, test_array),
(tensors.TensorCPU(test_array), test_array)
]
batches_cpu = [
([test_array], [test_array]),
([test_array] * 4, [test_array] * 4),
([tensors.TensorCPU(test_array)], [test_array]),
([tensors.TensorCPU(test_array)] * 4, [test_array] * 4),
(tensors.TensorListCPU(test_array), test_array),
]
yield from run_checks(samples_cpu, batches_cpu, [], [])
def test_non_uniform_batch():
batches_disallowed = [
[test_array, np.array([[42, 42]], dtype=np.uint8)],
non_uniform_tl()
]
for b in batches_disallowed:
yield raises(ValueError, "Uniform input is required (batch of tensors of equal shapes)")(
external_source_impl.batch_to_numpy), b
@attr('pytorch')
def test_pytorch_containers():
import torch
samples_cpu = [
(torch.tensor(test_array), test_array),
]
batches_cpu = [
([torch.tensor(test_array)], [test_array]),
([torch.tensor(test_array)] * 4, [test_array] * 4),
]
disallowed_samples = [
torch.tensor(test_array).cuda(),
]
yield from run_checks(samples_cpu, batches_cpu, disallowed_samples, [])
@attr('mxnet')
def test_mxnet_containers():
import mxnet as mx
samples_cpu = [
(mx.nd.array(test_array), test_array),
]
batches_cpu = [
([mx.nd.array(test_array)], [test_array]),
([mx.nd.array(test_array)] * 4, [test_array] * 4),
]
disallowed_samples = [
mx.nd.array(test_array, ctx=mx.gpu(0))
]
yield from run_checks(samples_cpu, batches_cpu, disallowed_samples, [])
@attr('cupy')
def test_cupy_containers():
import cupy as cp
test_array = cp.array([[42, 42], [42, 42]], dtype=cp.uint8)
disallowed_samples = [
test_array,
tensors.TensorGPU(test_array)
]
disallowed_batches = [
tensors.TensorListGPU(test_array)
]
yield from run_checks([], [], disallowed_samples, disallowed_batches)
|
DALI-main
|
dali/test/python/test_external_source_impl_utils.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# it is enough to just import all functions from test_internals_operator_external_source
# nose will query for the methods available and will run them
# the test_internals_operator_external_source is 99% the same for cupy and numpy tests
# so it is better to store everything in one file and just call `use_cupy` to switch
# between the default numpy and cupy
from test_external_source_impl import * # noqa: F401 F403
|
DALI-main
|
dali/test/python/test_external_source_numpy.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
from nvidia.dali.pipeline.experimental import pipeline_def
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import nvidia.dali.plugin.tf as dali_tf
from nose.tools import with_setup
from test_utils_tensorflow import skip_inputs_for_incompatible_tf
@with_setup(skip_inputs_for_incompatible_tf)
def test_both_tf_and_dali_conditionals():
@pipeline_def(enable_conditionals=True, batch_size=5, num_threads=4, device_id=0)
def dali_conditional_pipeline():
iter_id = fn.external_source(source=lambda x: np.array(x.iteration), batch=False)
if iter_id & 1 == 0:
output = types.Constant(np.array(-1), device="cpu")
else:
output = types.Constant(np.array(1), device="cpu")
return output
with tf.device("/cpu:0"):
dali_dataset = dali_tf.experimental.DALIDatasetWithInputs(
pipeline=dali_conditional_pipeline(),
batch_size=5,
output_shapes=(5, ),
output_dtypes=(tf.int32),
num_threads=4,
device_id=0,
)
@tf.function
def tf_function_with_conditionals(dali_dataset):
negative = tf.constant(0)
positive = tf.constant(0)
for input in dali_dataset:
if tf.reduce_sum(input) < 0:
negative = negative + 1
else:
positive = positive + 1
return negative, positive
pos, neg = tf_function_with_conditionals(dali_dataset.take(5))
assert pos == 3
assert neg == 2
|
DALI-main
|
dali/test/python/test_dali_tf_conditionals.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import multiprocessing
import socket
from contextlib import closing, contextmanager
import numpy as np
from nvidia.dali._multiproc.shared_batch import BufShmChunk, SharedBatchWriter, \
SharedBatchMeta, deserialize_batch
from nvidia.dali._multiproc.shared_queue import ShmQueue
from nvidia.dali._multiproc.messages import ShmMessageDesc
from test_utils import RandomlyShapedDataIterator
from nose_utils import raises
def check_serialize_deserialize(batch):
shm_chunk = BufShmChunk.allocate("chunk_0", 100)
with closing(shm_chunk) as shm_chunk:
writer = SharedBatchWriter(shm_chunk, batch)
batch_meta = SharedBatchMeta.from_writer(writer)
deserialized_batch = deserialize_batch(shm_chunk, batch_meta)
assert len(batch) == len(
deserialized_batch), "Lengths before and after should be the same"
for i in range(len(batch)):
np.testing.assert_array_equal(batch[i], deserialized_batch[i])
def test_serialize_deserialize():
for shapes in [[(10)], [(10, 20)], [(10, 20, 3)], [(1), (2)], [(2), (2, 3)],
[(2, 3, 4), (2, 3, 5), (3, 4, 5)], []]:
for dtype in [np.int8, np.float, np.int32]:
yield check_serialize_deserialize, [np.full(s, 42, dtype=dtype) for s in shapes]
def test_serialize_deserialize_random():
for max_shape in [(12, 200, 100, 3), (200, 300, 3), (300, 2)]:
for dtype in [np.uint8, np.float]:
rsdi = RandomlyShapedDataIterator(10, max_shape=max_shape, dtype=dtype)
for i, batch in enumerate(rsdi):
if i == 10:
break
yield check_serialize_deserialize, batch
def worker(start_method, sock, task_queue, res_queue, worker_cb, worker_params):
if start_method == "spawn":
task_queue.open_shm(multiprocessing.reduction.recv_handle(sock))
res_queue.open_shm(multiprocessing.reduction.recv_handle(sock))
sock.close()
while True:
if worker_cb(task_queue, res_queue, **worker_params) is None:
break
@contextmanager
def setup_queue_and_worker(start_method, capacity, worker_cb, worker_params):
mp = multiprocessing.get_context(start_method)
task_queue = ShmQueue(mp, capacity)
res_queue = ShmQueue(mp, capacity)
if start_method == "spawn":
socket_r, socket_w = socket.socketpair()
else:
socket_r = None
proc = mp.Process(target=worker, args=(start_method, socket_r, task_queue,
res_queue, worker_cb, worker_params))
proc.start()
try:
if start_method == "spawn":
pid = os.getppid()
multiprocessing.reduction.send_handle(socket_w, task_queue.shm.handle, pid)
multiprocessing.reduction.send_handle(socket_w, res_queue.shm.handle, pid)
yield task_queue, res_queue
finally:
if not proc.exitcode:
res_queue.close()
task_queue.close()
proc.join()
assert proc.exitcode == 0
def _put_msgs(queue, msgs, one_by_one):
if not one_by_one:
queue.put(msgs)
else:
for msg in msgs:
queue.put([msg])
def test_queue_full_assertion():
for start_method in ("spawn", "fork"):
for capacity in [1, 4]:
for one_by_one in (True, False):
mp = multiprocessing.get_context(start_method)
queue = ShmQueue(mp, capacity)
msgs = [ShmMessageDesc(i, i, i, i, i) for i in range(capacity + 1)]
yield raises(RuntimeError, "The queue is full")(_put_msgs), queue, msgs, one_by_one
def copy_callback(task_queue, res_queue, num_samples):
msgs = task_queue.get(num_samples=num_samples)
if msgs is None:
return
assert len(msgs) > 0
res_queue.put(msgs)
return msgs
def _test_queue_recv(start_method, worker_params, capacity, send_msgs, recv_msgs, send_one_by_one):
count = 0
def next_i():
nonlocal count
count += 1
return count
with setup_queue_and_worker(start_method, capacity, copy_callback, worker_params) \
as (task_queue, res_queue):
all_msgs = []
received = 0
for send_msg, recv_msg in zip(send_msgs, recv_msgs):
msgs = [ShmMessageDesc(next_i(), -next_i(), next_i(), next_i(), next_i())
for i in range(send_msg)]
all_msgs.extend(msgs)
_put_msgs(task_queue, msgs, send_one_by_one)
for _ in range(recv_msg):
[recv_msg] = res_queue.get()
msg_values = all_msgs[received].get_values()
received += 1
recv_msg_values = recv_msg.get_values()
assert len(msg_values) == len(recv_msg_values)
assert all(msg == recv_msg for msg, recv_msg in zip(msg_values, recv_msg_values))
def test_queue_recv():
capacities = [1, 13, 20, 100]
send_msgs = [(1, 1, 1), (7, 6, 5), (19, 5, 4, 9), (100, 100, 5)]
recv_msgs = [(1, 1, 1), (5, 1, 12), (19, 1, 5, 12), (100, 95, 10)]
for start_method in ("spawn", "fork"):
for capacity, send_msg, recv_msg in zip(capacities, send_msgs, recv_msgs):
for send_one_by_one in (True, False):
for worker_params in ({'num_samples': 1}, {'num_samples': None}):
yield _test_queue_recv, start_method, worker_params, capacity, \
send_msg, recv_msg, send_one_by_one
def _test_queue_large(start_method, msg_values):
with setup_queue_and_worker(
start_method, len(msg_values), copy_callback,
{'num_samples': None}) as (task_queue, res_queue):
msg_instances = [ShmMessageDesc(*values) for values in msg_values]
_put_msgs(task_queue, msg_instances, False)
for values in msg_values:
[recv_msg] = res_queue.get()
recv_msg_values = recv_msg.get_values()
assert len(values) == len(recv_msg_values)
assert all(msg == recv_msg for msg, recv_msg in zip(values, recv_msg_values))
def test_queue_large():
max_int32 = 2**31 - 1
max_uint32 = 2**32 - 1
max_uint64 = 2**64 - 1
msgs = [(max_int32, max_int32, max_int32, max_int32, max_int32),
(max_int32, max_int32, max_uint32, max_uint32, max_uint32),
(max_int32, max_int32, max_uint64, max_uint64, max_uint64)]
for start_method in ("spawn", "fork"):
for msg in msgs:
yield _test_queue_large, start_method, [msg]
def test_queue_large_failure():
max_int32 = 2**31 - 1
max_uint32 = 2**32 - 1
error_message = "Failed to serialize object as C-like structure. " \
"Tried to populate following fields:"
for start_method in ("spawn", "fork"):
yield raises(RuntimeError, error_message)(_test_queue_large), \
start_method, [(max_int32 + 1, 0, max_uint32, max_uint32, max_uint32)]
yield raises(RuntimeError, error_message)(_test_queue_large), \
start_method, [(max_int32, max_int32, -1, 0, 0)]
|
DALI-main
|
dali/test/python/test_external_source_parallel_shared_batch.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.tensors import TensorCPU, TensorGPU, TensorListCPU, TensorListGPU
import nvidia.dali.tensors as tensors
import numpy as np
import torch
from torch.utils.dlpack import to_dlpack
import ctypes
from nvidia.dali.backend import CheckDLPackCapsule
def convert_to_torch(tensor, device="cuda", dtype=None, size=None):
if size is None:
if isinstance(tensor, TensorListCPU) or isinstance(tensor, TensorListGPU):
t = tensor.as_tensor()
else:
t = tensor
size = t.shape()
dali_torch_tensor = torch.empty(size=size, device=device, dtype=dtype)
c_type_pointer = ctypes.c_void_p(dali_torch_tensor.data_ptr())
tensor.copy_to_external(c_type_pointer)
return dali_torch_tensor
def test_dlpack_tensor_gpu_direct_creation():
arr = torch.rand(size=[3, 5, 6], device="cuda")
tensor = TensorGPU(to_dlpack(arr))
dali_torch_tensor = convert_to_torch(tensor, device=arr.device, dtype=arr.dtype)
assert torch.all(arr.eq(dali_torch_tensor))
def test_dlpack_tensor_gpu_to_cpu():
arr = torch.rand(size=[3, 5, 6], device="cuda")
tensor = TensorGPU(to_dlpack(arr))
dali_torch_tensor = convert_to_torch(tensor, device=arr.device, dtype=arr.dtype)
assert torch.all(arr.cpu().eq(dali_torch_tensor.cpu()))
def test_dlpack_tensor_list_gpu_direct_creation():
arr = torch.rand(size=[3, 5, 6], device="cuda")
tensor_list = TensorListGPU(to_dlpack(arr), "NHWC")
dali_torch_tensor = convert_to_torch(tensor_list, device=arr.device, dtype=arr.dtype)
assert torch.all(arr.eq(dali_torch_tensor))
def test_dlpack_tensor_list_gpu_to_cpu():
arr = torch.rand(size=[3, 5, 6], device="cuda")
tensor_list = TensorListGPU(to_dlpack(arr), "NHWC")
dali_torch_tensor = convert_to_torch(tensor_list, device=arr.device, dtype=arr.dtype)
assert torch.all(arr.cpu().eq(dali_torch_tensor.cpu()))
def check_dlpack_types_gpu(t):
arr = torch.tensor([[0.39, 1.5], [1.5, 0.33]], device="cuda", dtype=t)
tensor = TensorGPU(to_dlpack(arr), "NHWC")
dali_torch_tensor = convert_to_torch(tensor, device=arr.device, dtype=arr.dtype,
size=tensor.shape())
assert torch.all(arr.eq(dali_torch_tensor))
def test_dlpack_interface_types():
for t in [
# the more recent PyTorch doesn't support
# torch.bool,
torch.int8, torch.int16, torch.int32, torch.int64, torch.uint8,
torch.float64, torch.float32, torch.float16
]:
yield check_dlpack_types_gpu, t
def test_dlpack_tensor_cpu_direct_creation():
arr = torch.rand(size=[3, 5, 6], device="cpu")
tensor = TensorCPU(to_dlpack(arr))
dali_torch_tensor = convert_to_torch(tensor, device=arr.device, dtype=arr.dtype)
assert torch.all(arr.eq(dali_torch_tensor))
def test_dlpack_tensor_list_cpu_direct_creation():
arr = torch.rand(size=[3, 5, 6], device="cpu")
tensor_list = TensorListCPU(to_dlpack(arr), "NHWC")
dali_torch_tensor = convert_to_torch(tensor_list, device=arr.device, dtype=arr.dtype)
assert torch.all(arr.eq(dali_torch_tensor))
def test_dlpack_tensor_list_cpu_direct_creation_list():
arr = torch.rand(size=[3, 5, 6], device="cpu")
tensor_list = TensorListCPU([to_dlpack(arr)], "NHWC")
dali_torch_tensor = convert_to_torch(tensor_list, device=arr.device, dtype=arr.dtype)
assert torch.all(arr.eq(dali_torch_tensor))
# Check if dlpack tensors behave correctly when created from temporary objects
def test_tensor_cpu_from_dlpack():
def create_tmp(idx):
a = np.full((4, 4), idx)
dlt = to_dlpack(torch.from_numpy(a))
return tensors.TensorCPU(dlt, "")
out = [create_tmp(i) for i in range(4)]
for i, t in enumerate(out):
np.testing.assert_array_equal(np.array(t), np.full((4, 4), i))
def test_tensor_list_cpu_from_dlpack():
def create_tmp(idx):
a = np.full((4, 4), idx)
dlt = to_dlpack(torch.from_numpy(a))
return tensors.TensorListCPU(dlt, "")
out = [create_tmp(i) for i in range(4)]
for i, tl in enumerate(out):
np.testing.assert_array_equal(tl.as_array(), np.full((4, 4), i))
def test_tensor_gpu_from_dlpack():
def create_tmp(idx):
a = np.full((4, 4), idx)
dlt = to_dlpack(torch.from_numpy(a).cuda())
return tensors.TensorGPU(dlt, "")
out = [create_tmp(i) for i in range(4)]
for i, t in enumerate(out):
np.testing.assert_array_equal(np.array(t.as_cpu()), np.full((4, 4), i))
def test_tensor_list_gpu_from_dlpack():
def create_tmp(idx):
a = np.full((4, 4), idx)
dlt = to_dlpack(torch.from_numpy(a).cuda())
return tensors.TensorListGPU(dlt, "")
out = [create_tmp(i) for i in range(4)]
for i, tl in enumerate(out):
np.testing.assert_array_equal(tl.as_cpu().as_array(), np.full((4, 4), i))
def check_dlpack_types_cpu(t):
arr = torch.tensor([[0.39, 1.5], [1.5, 0.33]], device="cpu", dtype=t)
tensor = TensorCPU(to_dlpack(arr), "NHWC")
dali_torch_tensor = convert_to_torch(tensor, device=arr.device, dtype=arr.dtype,
size=tensor.shape())
assert torch.all(arr.eq(dali_torch_tensor))
def test_dlpack_interface_types_cpu():
for t in [
# the more recent PyTorch doesn't support
# torch.bool,
torch.int8, torch.int16, torch.int32, torch.int64, torch.uint8,
torch.float64, torch.float32
]:
yield check_dlpack_types_cpu, t
def test_CheckDLPackCapsuleNone():
info = CheckDLPackCapsule(None)
assert info == (False, False)
def test_CheckDLPackCapsuleCpu():
arr = torch.rand(size=[3, 5, 6], device="cpu")
info = CheckDLPackCapsule(to_dlpack(arr))
assert info == (True, False)
def test_CheckDLPackCapsuleGpu():
arr = torch.rand(size=[3, 5, 6], device="cuda")
info = CheckDLPackCapsule(to_dlpack(arr))
assert info == (True, True)
|
DALI-main
|
dali/test/python/test_backend_impl_torch_dlpack.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def PSNR(data, orig):
mse = (np.square(np.subtract(data, orig))).mean()
return 10 * np.log10(255 * 255 / mse)
|
DALI-main
|
dali/test/python/test_noise_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
DALI-main
|
dali/test/python/autograph/impl/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conversion module."""
import imp
import sys
import unittest
import six
from nvidia.dali._autograph import utils
from nvidia.dali._autograph.core import config
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.impl import api
from nvidia.dali._autograph.impl import conversion
class ConversionTest(unittest.TestCase):
def _simple_program_ctx(self):
return converter.ProgramContext(
options=converter.ConversionOptions(recursive=True),
autograph_module=api)
def test_is_allowlisted(self):
def test_fn():
return 1
self.assertFalse(conversion.is_allowlisted(test_fn))
def test_is_allowlisted_callable_allowlisted_call(self):
allowlisted_mod = imp.new_module('test_allowlisted_call')
sys.modules['test_allowlisted_call'] = allowlisted_mod
config.CONVERSION_RULES = ((config.DoNotConvert('test_allowlisted_call'),) +
config.CONVERSION_RULES)
class TestClass(object):
def __call__(self):
pass
def allowlisted_method(self):
pass
TestClass.__module__ = 'test_allowlisted_call'
if six.PY2:
TestClass.__call__.__func__.__module__ = 'test_allowlisted_call'
else:
TestClass.__call__.__module__ = 'test_allowlisted_call'
class Subclass(TestClass):
def converted_method(self):
pass
tc = Subclass()
self.assertTrue(conversion.is_allowlisted(TestClass.__call__))
self.assertTrue(conversion.is_allowlisted(tc))
self.assertTrue(conversion.is_allowlisted(tc.__call__))
self.assertTrue(conversion.is_allowlisted(tc.allowlisted_method))
self.assertFalse(conversion.is_allowlisted(Subclass))
self.assertFalse(conversion.is_allowlisted(tc.converted_method))
|
DALI-main
|
dali/test/python/autograph/impl/test_conversion.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for api module."""
import abc
import collections
import contextlib
import functools
import gc
import imp
import inspect
import os
import re
import sys
import textwrap
import types
import unittest
import unittest.mock
import numpy as np
import six
from operator import add
from functools import reduce
from nvidia.dali._autograph.core import ag_ctx
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.core import converter_testing
from nvidia.dali._autograph.impl import api
from nvidia.dali._autograph.impl import conversion
from nvidia.dali._autograph.pyct import errors
from nvidia.dali._autograph.pyct import inspect_utils
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.utils import ag_logging
# from nvidia.dali._autograph.utils.all_utils import custom_constant
global_n = 2
DEFAULT_RECURSIVE = converter.ConversionOptions(recursive=True)
class TestResource(object):
def __init__(self):
self.x = 3
def custom_constant(val):
return np.array(val)
class ApiTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._transpiler_bkp = api._TRANSPILER
cls._conversion_rules_bkp = api.config.CONVERSION_RULES
api._TRANSPILER = None
api.initialize_autograph()
@classmethod
def tearDownClass(cls):
api._TRANSPILER = cls._transpiler_bkp
api.config.CONVERSION_RULES = cls._conversion_rules_bkp
def evaluate(self, x):
return x
def assertAllEqual(self, a, b):
if isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
same = np.array(a) == np.array(b)
self.assertTrue(np.all(same))
else:
self.assertEqual(a, b)
@contextlib.contextmanager
def assertPrints(self, expected, not_expected):
try:
out_capturer = six.StringIO()
sys.stdout = out_capturer
yield
self.assertIn(expected, out_capturer.getvalue())
self.assertNotIn(not_expected, out_capturer.getvalue())
finally:
sys.stdout = sys.__stdout__
def assertNoMemoryLeaks(self, f):
object_ids_before = {id(o) for o in gc.get_objects()}
f()
gc.collect()
objects_after = tuple(
o for o in gc.get_objects() if id(o) not in object_ids_before)
self.assertEqual(
tuple(o for o in objects_after if isinstance(o, TestResource)), ())
def test_converted_call_kwonly_args(self):
def test_fn(*, a):
return a
x = api.converted_call(
test_fn, (), {'a': custom_constant(-1)}, options=DEFAULT_RECURSIVE)
self.assertEqual(-1, self.evaluate(x))
def test_super_with_no_arg(self):
test_case_self = self
class TestBase:
def plus_three(self, x):
return x + 3
class TestSubclass(TestBase):
def plus_three(self, x):
test_case_self.fail('This should never be called.')
def no_arg(self, x):
return super().plus_three(x)
tc = api.converted_call(TestSubclass, (), None, options=DEFAULT_RECURSIVE)
self.assertEqual(5, tc.no_arg(2))
def test_converted_call_avoids_triggering_operators(self):
test_self = self
class Pair(collections.namedtuple('Pair', ['a', 'b'])):
def __call__(self):
return self.a + self.b
def __eq__(self, other):
test_self.fail('Triggered operator')
p = Pair(custom_constant(1), custom_constant(2))
x = api.converted_call(p, (), {}, options=DEFAULT_RECURSIVE)
self.assertIsNotNone(self.evaluate(x), 3)
def test_decorator_recursive(self):
class TestClass(object):
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while reduce(add, x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
x = tc.test_method(
custom_constant([2, 4]), custom_constant(1),
custom_constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
def test_decorator_not_recursive(self):
class TestClass(object):
def called_member(self, a):
return -a
@api.convert(recursive=False)
def test_method(self, x, s, a):
while reduce(add, x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
x = tc.test_method(
custom_constant([2, 4]), custom_constant(1),
custom_constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
def test_convert_then_do_not_convert(self):
class TestClass(object):
@api.do_not_convert
def called_member(self, a):
return -a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while reduce(add, x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
x = tc.test_method(
custom_constant((2, 4)), custom_constant(1),
custom_constant(-2))
self.assertAllEqual((0, 1), self.evaluate(x))
def test_decorator_calls_decorated(self):
class TestClass(object):
@api.convert()
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while reduce(add, x) > s:
x //= self.called_member(a)
return x
tc = TestClass()
x = tc.test_method(
custom_constant([2, 4]), custom_constant(1),
custom_constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
# TODO(klecki): Argspec mismatches
def _test_decorator_preserves_argspec(self):
class TestClass(object):
def test_method(self, a):
if a < 0:
a = -a
return a
test_method_converted = api.convert()(test_method)
tc = TestClass()
self.assertListEqual(
list(inspect.getfullargspec(tc.test_method)),
list(inspect.getfullargspec(tc.test_method_converted)))
def test_do_not_convert_argspec(self):
class TestClass(object):
def test_method(self, x, y):
z = x + y
return z
test_method_allowlisted = api.do_not_convert(test_method)
tc = TestClass()
self.assertTrue(inspect.ismethod(tc.test_method_allowlisted))
# Because the wrapped function is not generated, we can't preserve its
# arg spec.
def test_do_not_convert_callable_object(self):
class TestClass(object):
def __call__(self):
return 1
tc = TestClass()
self.assertEqual(1, api.do_not_convert(tc)())
def test_convert_call_site_decorator(self):
class TestClass(object):
def called_member(self, a):
if a < 0:
a = -a
return a
@api.convert(recursive=True)
def test_method(self, x, s, a):
while reduce(add, x) > s:
x //= api.converted_call(
self.called_member, (a,), None, options=DEFAULT_RECURSIVE)
return x
tc = TestClass()
x = tc.test_method(custom_constant([2, 4]), custom_constant(1),
custom_constant(-2))
self.assertListEqual([0, 1], self.evaluate(x).tolist())
def test_converted_call_builtin(self):
x = api.converted_call(range, (3,), None, options=DEFAULT_RECURSIVE)
self.assertEqual((0, 1, 2), tuple(x))
x = api.converted_call(
re.compile, ('mnas_v4_a.*\\/.*(weights|kernel):0$',),
None,
options=DEFAULT_RECURSIVE)
self.assertIsNotNone(x.match('mnas_v4_a/weights:0'))
def test_converted_call_function(self):
def test_fn(x):
if x < 0:
return -x
return x
x = api.converted_call(
test_fn, (custom_constant(-1),), None, options=DEFAULT_RECURSIVE)
self.assertEqual(1, self.evaluate(x))
def test_converted_call_functools_partial(self):
def test_fn(x, y, z):
if x < 0:
return -x, -y, -z
return x, y, z
x = api.converted_call(
functools.partial(test_fn, custom_constant(-1), z=-3),
(custom_constant(-2),),
None,
options=DEFAULT_RECURSIVE)
self.assertEqual((1, 2, 3), self.evaluate(x))
x = api.converted_call(
functools.partial(
functools.partial(test_fn, custom_constant(-1)), z=-3),
(custom_constant(-2),),
None,
options=DEFAULT_RECURSIVE)
self.assertEqual((1, 2, 3), self.evaluate(x))
def test_converted_call_functools_partial_kwarg_mutation(self):
def test_fn(x, y, z):
if x < 0:
return -x, -y, -z
return x, y, z
partial_fn = functools.partial(test_fn, custom_constant(-1), z=-3)
# Call using kwargs to assign y first to ensure that partial_fn.keywords is
# not mutated for subsequent calls (where y is assign through args).
x = api.converted_call(
partial_fn,
args=(),
kwargs={
'y': custom_constant(-2),
},
options=DEFAULT_RECURSIVE)
self.assertEqual((1, 2, 3), self.evaluate(x))
x = api.converted_call(
partial_fn,
args=(custom_constant(-4),),
kwargs=None,
options=DEFAULT_RECURSIVE)
self.assertEqual((1, 4, 3), self.evaluate(x))
def test_converted_call_method(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_method(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(custom_constant(-1))
x = api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE)
self.assertEqual(1, self.evaluate(x))
def test_converted_call_synthetic_method(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_function(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(custom_constant(-1))
test_method = types.MethodType(test_function, tc)
x = api.converted_call(test_method, (), None, options=DEFAULT_RECURSIVE)
self.assertEqual(1, self.evaluate(x))
def test_converted_call_method_wrapper(self):
class TestClass(object):
def foo(self):
pass
tc = TestClass()
# `method.__get__()` returns a so-called method-wrapper.
wrapper = api.converted_call(
tc.foo.__get__, (tc,), None, options=DEFAULT_RECURSIVE)
self.assertEqual(wrapper, tc.foo)
def test_converted_call_method_as_object_attribute(self):
class AnotherClass(object):
def __init__(self):
self.another_class_attr = custom_constant(1)
def method(self):
if self.another_class_attr > 0:
return self.another_class_attr + 1
return self.another_class_attr + 10
class TestClass(object):
def __init__(self, another_obj_method):
self.another_obj_method = another_obj_method
obj = AnotherClass()
tc = TestClass(obj.method)
x = api.converted_call(
tc.another_obj_method, (), None, options=DEFAULT_RECURSIVE)
self.assertEqual(self.evaluate(x), 2)
def test_converted_call_method_converts_recursively(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def other_method(self):
if self.x < 0:
return -self.x
return self.x
def test_method(self):
return self.other_method()
tc = TestClass(custom_constant(-1))
x = api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE)
self.assertEqual(1, self.evaluate(x))
def test_converted_call_method_by_class(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def test_method(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(custom_constant(-1))
x = api.converted_call(
TestClass.test_method, (tc,), None, options=DEFAULT_RECURSIVE)
self.assertEqual(1, self.evaluate(x))
def test_converted_call_callable_object(self):
class TestClass(object):
def __init__(self, x):
self.x = x
def __call__(self):
if self.x < 0:
return -self.x
return self.x
tc = TestClass(custom_constant(-1))
x = api.converted_call(tc, (), None, options=DEFAULT_RECURSIVE)
self.assertEqual(1, self.evaluate(x))
def test_converted_call_callable_metaclass(self):
test_self = self
class TestMetaclass(type):
def __call__(cls): # pylint: disable=method-hidden
self.assertTrue(converter_testing.is_inside_generated_code())
inst = object.__new__(cls)
inst.__init__()
def instance_call(unused_self):
test_self.fail(
'The class-bound __call__ should be called, not the instance'
' bound one.')
inst.__call__ = instance_call
return inst
tmc = TestMetaclass('TestClass', (), {})
tc = api.converted_call(tmc, (), None, options=DEFAULT_RECURSIVE)
self.assertIsInstance(tc, tmc)
def test_converted_call_callable_abc(self):
test_self = self
@six.add_metaclass(abc.ABCMeta)
class TestBase(object):
@abc.abstractmethod
def __call__(self):
test_self.fail('This should not be called')
class TestSubclass(TestBase):
def __init__(self):
test_self.assertFalse(converter_testing.is_inside_generated_code())
def __call__(self, expected):
test_self.assertTrue(expected)
test_self.assertTrue(converter_testing.is_inside_generated_code())
tc = api.converted_call(TestSubclass, (), None, options=DEFAULT_RECURSIVE)
api.converted_call(tc, (True,), None, options=DEFAULT_RECURSIVE)
def test_converted_call_constructor(self):
test_self = self
class TestClass(object):
def __init__(self):
test_self.assertFalse(converter_testing.is_inside_generated_code())
tc = api.converted_call(TestClass, (), None, options=DEFAULT_RECURSIVE)
self.assertIsInstance(tc, TestClass)
def test_converted_call_mangled_properties(self):
class TestClass(object):
def __init__(self):
self.__private = custom_constant(-1)
def test_method(self):
return self.__private
tc = TestClass()
with self.assertRaisesRegex(errors.UnsupportedLanguageElementError,
'mangled names'):
api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE)
# TODO(mdan): Refactor to avoid this use of global state.
ag_logging.set_verbosity(0, True)
os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '0'
with self.assertPrints('could not transform', 'bug'):
api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE)
ag_logging.set_verbosity(0, False)
os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '1'
def test_converted_call_partial_of_allowlisted_function(self):
def test_fn(_):
self.assertFalse(converter_testing.is_inside_generated_code())
converter_testing.allowlist(test_fn)
api.converted_call(
functools.partial(test_fn, None), (), None, options=DEFAULT_RECURSIVE)
def test_converted_call_already_converted(self):
def f(x):
return x == 0
x = api.converted_call(
f, (custom_constant(0),), None, options=DEFAULT_RECURSIVE)
self.assertTrue(self.evaluate(x))
converted_f = api.to_graph(
f, experimental_optional_features=converter.Feature.ALL)
x = api.converted_call(
converted_f, (custom_constant(0),),
None,
options=DEFAULT_RECURSIVE)
self.assertTrue(self.evaluate(x))
def test_converted_call_then_already_converted_dynamic(self):
@api.convert()
def g(x):
if x > 0:
return x
else:
return -x
def f(g, x):
return g(x)
x = api.converted_call(
f, (g, custom_constant(1)), None, options=DEFAULT_RECURSIVE)
self.assertEqual(self.evaluate(x), 1)
def test_converted_call_forced_when_explicitly_allowlisted(self):
@api.do_not_convert()
def f(x):
return x + 1
opts = converter.ConversionOptions(recursive=True, user_requested=True)
x = api.converted_call(f, (custom_constant(0),), None, options=opts)
self.assertTrue(self.evaluate(x))
converted_f = api.to_graph(
f, experimental_optional_features=converter.Feature.ALL)
x = api.converted_call(converted_f, (0,), None, options=DEFAULT_RECURSIVE)
self.assertEqual(x, 1)
def test_converted_call_no_kwargs_allowed(self):
def f(*args):
# Note: np.broadcast rejects any **kwargs, even *{}
return np.broadcast(args[:1])
opts = converter.ConversionOptions(internal_convert_user_code=False)
self.assertIsNotNone(
api.converted_call(f, (1, 2, 3, 4), None, options=opts))
def test_converted_call_allowlisted_method(self):
class TestClass(object):
def method(self):
return converter_testing.is_inside_generated_code()
obj = TestClass()
converter_testing.allowlist(obj.method.__func__)
self.assertFalse(
api.converted_call(obj.method, (), {}, options=DEFAULT_RECURSIVE))
def test_converted_call_allowlisted_method_via_owner(self):
class TestClass(object):
def method(self):
return converter_testing.is_inside_generated_code()
converter_testing.allowlist(TestClass)
obj = TestClass()
self.assertFalse(
api.converted_call(obj.method, (), {}, options=DEFAULT_RECURSIVE))
def test_converted_call_numpy(self):
x = api.converted_call(np.arange, (5,), None, options=DEFAULT_RECURSIVE)
self.assertAllEqual(x, list(range(5)))
def test_converted_call_tf_op_forced(self):
# TODO(mdan): Add the missing level of support to LOGICAL_EXPRESSIONS.
opts = converter.ConversionOptions(
user_requested=True, optional_features=None)
x = api.converted_call(add, (1, 1), None, options=opts)
self.assertAllEqual(self.evaluate(x), 2)
def test_converted_call_exec_generated_code(self):
temp_mod = imp.new_module('test_module')
dynamic_code = """
def foo(x):
return x + 1
"""
exec(textwrap.dedent(dynamic_code), temp_mod.__dict__) # pylint:disable=exec-used
opts = converter.ConversionOptions(optional_features=None)
x = api.converted_call(temp_mod.foo, (1,), None, options=opts)
self.assertAllEqual(x, 2)
def test_converted_call_namedtuple(self):
x = api.converted_call(
collections.namedtuple, ('TestNamedtuple', ('a', 'b')),
None,
options=DEFAULT_RECURSIVE)
self.assertTrue(inspect_utils.isnamedtuple(x))
def test_converted_call_namedtuple_via_collections(self):
x = api.converted_call(
collections.namedtuple, ('TestNamedtuple', ('a', 'b')),
None,
options=DEFAULT_RECURSIVE)
self.assertTrue(inspect_utils.isnamedtuple(x))
def test_converted_call_namedtuple_subclass_bound_method(self):
class TestClass(collections.namedtuple('TestNamedtuple', ('a', 'b'))):
def test_method(self, x):
while reduce(add, x) > self.a:
x //= self.b
return x
obj = TestClass(5, 2)
x = api.converted_call(
obj.test_method, (custom_constant([2, 4]),),
None,
options=DEFAULT_RECURSIVE)
self.assertAllEqual(self.evaluate(x), [1, 2])
def test_converted_call_namedtuple_method(self):
class TestClass(collections.namedtuple('TestNamedtuple', ('a', 'b'))):
pass
obj = TestClass(5, 2)
# _asdict is a documented method of namedtuple.
x = api.converted_call(obj._asdict, (), None, options=DEFAULT_RECURSIVE)
self.assertDictEqual(x, {'a': 5, 'b': 2})
def test_converted_call_namedtuple_subclass_unbound_method(self):
class TestClass(collections.namedtuple('TestNamedtuple', ('a', 'b'))):
def test_method(self, x):
while reduce(add, x) > self.a:
x //= self.b
return x
obj = TestClass(5, 2)
x = api.converted_call(
TestClass.test_method, (obj, custom_constant([2, 4])),
None,
options=DEFAULT_RECURSIVE)
self.assertAllEqual(self.evaluate(x), [1, 2])
def test_converted_call_lambda(self):
l = lambda x: x == 0
x = api.converted_call(
l, (custom_constant(0),), None, options=DEFAULT_RECURSIVE)
self.assertAllEqual(True, self.evaluate(x))
def test_converted_call_native_binding(self):
x = api.converted_call(np.power, (2, 2), None, options=DEFAULT_RECURSIVE)
self.assertAllEqual(x, 4)
def test_converted_call_native_binding_errorneous(self):
class FaultyBinding(object):
def __array__(self):
raise ValueError('fault')
bad_obj = FaultyBinding()
def fail_if_warning(*_):
self.fail('No warning should be issued')
with unittest.mock.patch.object(ag_logging, 'warning', fail_if_warning):
with self.assertRaisesRegex(ValueError, 'fault'):
api.converted_call(
np.power, (bad_obj, 2), None, options=DEFAULT_RECURSIVE)
def test_converted_call_no_leaks_via_closure(self):
def test_fn():
res = TestResource()
def f(y):
return res.x + y
api.converted_call(f, (1,), None, options=DEFAULT_RECURSIVE)
self.assertNoMemoryLeaks(test_fn)
def test_converted_call_no_leaks_via_inner_function_closure(self):
def test_fn():
res = TestResource()
def f(y):
def inner_f():
return res.x + y
return inner_f
api.converted_call(f, (1,), None, options=DEFAULT_RECURSIVE)()
self.assertNoMemoryLeaks(test_fn)
def test_converted_call_no_caching_on_abort(self):
def test_fn(needs_autograph):
if needs_autograph:
if custom_constant(True):
x = custom_constant(1)
else:
x = custom_constant(2)
else:
x = 3
return x
def call_in_disabled_context():
with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):
return api.converted_call(
test_fn, (False,), None, options=DEFAULT_RECURSIVE)
def call_in_default_context():
with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED):
return api.converted_call(
test_fn, (True,), None, options=DEFAULT_RECURSIVE)
# Note: this is an invariant, not a test (see above).
assert call_in_disabled_context() == 3
# If api.convert placed test_fn in the unconverted cache, this second
# invocation would fail.
self.assertEqual(self.evaluate(call_in_default_context()), 1)
def test_converted_call_caching_of_allowlisted_bound_methods(self):
class TestClass(object):
def __init__(self):
self.__private = custom_constant(-1)
def test_method(self):
return self.__private
# TODO(mdan): Refactor to avoid this use of global state.
cache_size_before = len(conversion._ALLOWLIST_CACHE)
# First invocation with fallback on, to allow recording it into cache.
os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '0'
tc = TestClass()
api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE)
os.environ['AUTOGRAPH_STRICT_CONVERSION'] = '1'
# Entry should be added to the allowlist cache.
self.assertEqual(len(conversion._ALLOWLIST_CACHE), cache_size_before + 1)
# A second invocation should go through even with fallback off.
tc = TestClass()
api.converted_call(tc.test_method, (), None, options=DEFAULT_RECURSIVE)
# No new entries should appear in the allowlist cache.
self.assertEqual(len(conversion._ALLOWLIST_CACHE), cache_size_before + 1)
def test_context_tracking_direct_calls(self):
@api.do_not_convert()
def unconverted_fn():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.DISABLED)
@api.convert()
def converted_fn():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.ENABLED)
unconverted_fn()
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.ENABLED)
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.UNSPECIFIED)
converted_fn()
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.UNSPECIFIED)
@api.call_with_unspecified_conversion_status
def unspecified_fn():
self.assertEqual(ag_ctx.control_status_ctx().status,
ag_ctx.Status.UNSPECIFIED)
unspecified_fn()
def test_to_graph_with_defaults(self):
foo = 4
def test_fn(x, s=foo):
while reduce(add, x) > s:
x //= 2
return x
compiled_fn = api.to_graph(test_fn)
x = compiled_fn(custom_constant([4, 8]))
self.assertListEqual([1, 2], self.evaluate(x).tolist())
def test_to_graph_with_globals(self):
def test_fn(x):
global global_n
global_n = x + global_n
return global_n
converted_fn = api.to_graph(test_fn)
prev_val = global_n
converted_fn(10)
self.assertGreater(global_n, prev_val)
def test_to_graph_with_kwargs_clashing_converted_call(self):
def called_fn(**kwargs):
return kwargs['f'] + kwargs['owner']
def test_fn():
# These arg names intentionally match converted_call's
return called_fn(f=1, owner=2)
compiled_fn = api.to_graph(test_fn)
self.assertEqual(compiled_fn(), 3)
def test_to_graph_with_kwargs_clashing_unconverted_call(self):
@api.do_not_convert
def called_fn(**kwargs):
return kwargs['f'] + kwargs['owner']
def test_fn():
# These arg names intentionally match _call_unconverted's
return called_fn(f=1, owner=2)
compiled_fn = api.to_graph(test_fn)
self.assertEqual(compiled_fn(), 3)
def test_to_graph_caching(self):
def test_fn(x):
if x > 0:
return x
else:
return -x
converted_functions = tuple(api.to_graph(test_fn) for _ in (-1, 0, 1))
# All outputs are from the same module. We can't use __module__ because
# that's reset when we instantiate the function (see conversion.py).
# TODO(mdan): Can and should we overwrite __module__ instead?
module_names = frozenset(f.ag_module for f in converted_functions)
self.assertEqual(len(module_names), 1)
self.assertNotIn('__main__', module_names)
self.assertEqual(len(frozenset(id(f) for f in converted_functions)), 3)
def test_to_graph_caching_different_options(self):
def called_fn():
pass
def test_fn():
return called_fn()
converted_recursive = api.to_graph(test_fn, recursive=True)
converted_non_recursive = api.to_graph(test_fn, recursive=False)
self.assertNotEqual(converted_recursive.ag_module,
converted_non_recursive.ag_module)
self.assertRegex(
inspect.getsource(converted_recursive),
'FunctionScope(.*recursive=True.*)')
self.assertRegex(
inspect.getsource(converted_non_recursive),
'FunctionScope(.*recursive=False.*)')
def test_to_graph_preserves_bindings(self):
y = 3
def test_fn():
return y
converted = api.to_graph(test_fn)
self.assertEqual(converted(), 3)
y = 7
self.assertEqual(converted(), 7)
def test_to_graph_source_map(self):
def test_fn(y):
return y**2
self.assertTrue(hasattr(api.to_graph(test_fn), 'ag_source_map'))
def test_to_code_basic(self):
def test_fn(x, s):
while reduce(add, x) > s:
x /= 2
return x
# Just check that the output is parseable Python code.
self.assertIsNotNone(parser.parse(api.to_code(test_fn)))
def test_tf_convert_overrides_current_context(self):
def f(expect_converted):
self.assertEqual(converter_testing.is_inside_generated_code(),
expect_converted)
@api.do_not_convert
def test_fn(ctx, expect_converted):
return api.tf_convert(f, ctx)(expect_converted)
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.ENABLED), True)
test_fn(ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED), False)
def test_super_with_one_arg(self):
test_case_self = self
class TestBase(object):
def plus_three(self, x):
return x + 3
class TestSubclass(TestBase):
def plus_three(self, x):
test_case_self.fail('This should never be called.')
def one_arg(self, x):
test_base_unbound = super(TestSubclass)
test_base = test_base_unbound.__get__(self, TestSubclass)
return test_base.plus_three(x)
tc = api.converted_call(TestSubclass, (), None, options=DEFAULT_RECURSIVE)
self.assertEqual(5, tc.one_arg(2))
def test_super_with_two_args(self):
test_case_self = self
class TestBase(object):
def plus_three(self, x):
return x + 3
class TestSubclass(TestBase):
def plus_three(self, x):
test_case_self.fail('This should never be called.')
def two_args(self, x):
return super(TestSubclass, self).plus_three(x)
tc = api.converted_call(TestSubclass, (), None, options=DEFAULT_RECURSIVE)
self.assertEqual(5, tc.two_args(2))
|
DALI-main
|
dali/test/python/autograph/impl/test_api.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
DALI-main
|
dali/test/python/autograph/core/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for converter module."""
import imp
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.core import converter_testing
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import loader
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import templates
class TestConverter(converter.Base):
pass
class ConversionOptionsTest(converter_testing.TestCase):
def test_to_ast(self):
opts = converter.ConversionOptions()
opts_ast = opts.to_ast()
template = '''
def f():
return opts_ast
'''
opts_packed = templates.replace(template, opts_ast=opts_ast)
reparsed, _, _ = loader.load_ast(opts_packed)
fake_ag = imp.new_module('fake_ag')
fake_ag.ConversionOptions = converter.ConversionOptions
fake_ag.Feature = converter.Feature
reparsed.ag__ = fake_ag
reparsed_opts = reparsed.f()
self.assertEqual(opts.recursive, reparsed_opts.recursive)
self.assertEqual(opts.user_requested, False)
self.assertEqual(
opts.internal_convert_user_code,
reparsed_opts.internal_convert_user_code)
self.assertEqual(opts.optional_features, reparsed_opts.optional_features)
class ConverterBaseTest(converter_testing.TestCase):
def test_get_definition_directive_basic(self):
directive_key = object
def f():
a = 1
return a
_, node, ctx = self.transform(f, (), include_ast=True)
symbol_a = node.body[1].value
defs, = anno.getanno(symbol_a, anno.Static.ORIG_DEFINITIONS)
defs.directives[directive_key] = {
'test_arg': parser.parse_expression('foo'),
'other_arg': parser.parse_expression('bar'),
}
c = TestConverter(ctx)
value = c.get_definition_directive(symbol_a, directive_key, 'test_arg',
None)
self.assertEqual(value.id, 'foo')
def test_get_definition_directive_default(self):
directive_key = object
def f():
a = 1
return a
_, node, ctx = self.transform(f, (), include_ast=True)
symbol_a = node.body[1].value
c = TestConverter(ctx)
value = c.get_definition_directive(symbol_a, directive_key, 'test_arg',
parser.parse_expression('default'))
self.assertEqual(value.id, 'default')
def test_get_definition_directive_multiple_consistent(self):
directive_key = object
def f():
a = 1
if a:
a = 2
return a
_, node, ctx = self.transform(f, (), include_ast=True)
symbol_a = node.body[2].value
defs = anno.getanno(symbol_a, anno.Static.ORIG_DEFINITIONS)
defs[0].directives[directive_key] = {
'test_arg': parser.parse_expression('foo'),
'other_arg': parser.parse_expression('bar'),
}
defs[1].directives[directive_key] = {
'test_arg': parser.parse_expression('foo'),
'other_arg': parser.parse_expression('baz'),
}
c = TestConverter(ctx)
value = c.get_definition_directive(symbol_a, directive_key, 'test_arg',
None)
self.assertEqual(value.id, 'foo')
def test_get_definition_directive_multiple_inconsistent(self):
directive_key = object
def f():
a = 1
if a:
a = 2
return a
_, node, ctx = self.transform(f, (), include_ast=True)
symbol_a = node.body[2].value
defs = anno.getanno(symbol_a, anno.Static.ORIG_DEFINITIONS)
defs[0].directives[directive_key] = {
'test_arg': parser.parse_expression('foo'),
}
defs[1].directives[directive_key] = {
'test_arg': parser.parse_expression('bar'),
}
c = TestConverter(ctx)
with self.assertRaises(ValueError):
c.get_definition_directive(symbol_a, directive_key, 'test_arg', None)
|
DALI-main
|
dali/test/python/autograph/core/test_converter.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data_structures module."""
import unittest
from nvidia.dali._autograph.operators import data_structures
class ListTest(unittest.TestCase):
def test_append_python(self):
l = []
self.assertEqual(data_structures.list_append(l, 1), [1])
self.assertEqual(data_structures.list_append(l, 2), [1, 2])
def test_pop_python(self):
l = [1, 2, 3]
opts = data_structures.ListPopOpts(element_dtype=None, element_shape=())
self.assertEqual(data_structures.list_pop(l, None, opts), ([1, 2], 3))
self.assertEqual(data_structures.list_pop(l, None, opts), ([1], 2))
|
DALI-main
|
dali/test/python/autograph/operators/test_data_structures.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for logical module."""
import unittest
from nvidia.dali._autograph.operators import logical
class LogicalOperatorsTest(unittest.TestCase):
def assertNotCalled(self):
self.fail('this should not be called')
def test_and_python(self):
self.assertTrue(logical.and_(lambda: True, lambda: True))
self.assertTrue(logical.and_(lambda: [1], lambda: True))
self.assertListEqual(logical.and_(lambda: True, lambda: [1]), [1])
self.assertFalse(logical.and_(lambda: False, lambda: True))
self.assertFalse(logical.and_(lambda: False, self.assertNotCalled))
def test_or_python(self):
self.assertFalse(logical.or_(lambda: False, lambda: False))
self.assertFalse(logical.or_(lambda: [], lambda: False))
self.assertListEqual(logical.or_(lambda: False, lambda: [1]), [1])
self.assertTrue(logical.or_(lambda: False, lambda: True))
self.assertTrue(logical.or_(lambda: True, self.assertNotCalled))
def test_not_python(self):
self.assertFalse(logical.not_(True))
self.assertFalse(logical.not_([1]))
self.assertTrue(logical.not_([]))
|
DALI-main
|
dali/test/python/autograph/operators/test_logical.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slices module."""
import unittest
from nvidia.dali._autograph.operators import slices
from nvidia.dali._autograph.utils.all_utils import custom_constant
class SlicesTest(unittest.TestCase):
def test_set_item_tensor_list(self):
initial_list = custom_constant([[1, 2], [3, 4]])
l = slices.set_item(initial_list, 0, [5, 6])
self.assertEqual(l, [[5, 6], [3, 4]])
def test_get_item_tensor_list(self):
initial_list = custom_constant([[1, 2], [3, 4]])
t = slices.get_item(initial_list, 1, slices.GetItemOpts(None))
self.assertEqual(t, [3, 4])
def test_get_item_tensor_string(self):
initial_str = custom_constant('abcd')
t = slices.get_item(initial_str, 1, slices.GetItemOpts(None))
self.assertEqual(t, 'b')
initial_list_str = custom_constant(['abcd', 'bcde'])
t = slices.get_item(initial_list_str, 1, slices.GetItemOpts(None))
self.assertEqual(t, 'bcde')
|
DALI-main
|
dali/test/python/autograph/operators/test_slices.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for exceptions module."""
import unittest
from nvidia.dali._autograph.operators import exceptions
class ExceptionsTest(unittest.TestCase):
def test_assert_python_untriggered(self):
side_effect_trace = []
def expression_with_side_effects():
side_effect_trace.append(object())
return 'test message'
exceptions.assert_stmt(True, expression_with_side_effects)
self.assertListEqual(side_effect_trace, [])
def test_assert_python_triggered(self):
if not __debug__:
# Python assertions only be tested when in debug mode.
return
side_effect_trace = []
tracer = object()
def expression_with_side_effects():
side_effect_trace.append(tracer)
return 'test message'
with self.assertRaisesRegex(AssertionError, 'test message'):
exceptions.assert_stmt(False, expression_with_side_effects)
self.assertListEqual(side_effect_trace, [tracer])
|
DALI-main
|
dali/test/python/autograph/operators/test_exceptions.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
DALI-main
|
dali/test/python/autograph/operators/__init__.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conditional_expressions module."""
import unittest
from nvidia.dali._autograph.operators import conditional_expressions
def _basic_expr(cond):
return conditional_expressions.if_exp(
cond,
lambda: 1,
lambda: 2,
'cond')
class IfExpTest(unittest.TestCase):
def test_python(self):
self.assertEqual(_basic_expr(True), 1)
self.assertEqual(_basic_expr(False), 2)
self.assertEqual(
conditional_expressions.if_exp(True, lambda: 1, lambda: 2, ''), 1)
self.assertEqual(
conditional_expressions.if_exp(False, lambda: 1, lambda: 2, ''), 2)
|
DALI-main
|
dali/test/python/autograph/operators/test_conditional_expressions.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for control_flow module."""
# Unfortunately pylint has false positives when nonlocal is present.
# pylint:disable=unused-variable
import unittest
from nvidia.dali._autograph.operators import control_flow
class ForLoopTest(unittest.TestCase):
def test_python(self):
def body(i):
nonlocal s
s = s * 10 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
control_flow.for_stmt(
range(5),
extra_test=lambda: True,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(s, 1234)
def test_python_generator_with_extra_test(self):
def new_generator():
for i in range(1, 5):
yield i
gen = new_generator()
def run_loop():
s = 0
c = 0
def body(i):
nonlocal s, c
s = s * 10 + i
c += 1
control_flow.for_stmt(
gen,
extra_test=lambda: c == 0, # Break after first iteration
body=body,
get_state=None,
set_state=None,
symbol_names=('s', 'c'),
opts={})
return s, c
self.assertEqual(run_loop(), (1, 1))
self.assertEqual(run_loop(), (2, 1))
self.assertEqual(run_loop(), (3, 1))
self.assertEqual(next(gen), 4)
def test_python_generator_with_extra_test_no_iterations(self):
def new_generator():
for i in range(5):
yield i
gen = new_generator()
def run_loop():
s = 0
def body(i):
nonlocal s
s = s * 10 + i
control_flow.for_stmt(
gen,
extra_test=lambda: False, # Break before loop
body=body,
get_state=None,
set_state=None,
symbol_names=('s',),
opts={})
return s
self.assertEqual(run_loop(), 0)
self.assertEqual(run_loop(), 0)
self.assertEqual(next(gen), 0)
class WhileLoopTest(unittest.TestCase):
def test_python(self):
def body():
nonlocal i, s
s = s * 10 + i
i += 1
i = 0
s = 0
n = 5
control_flow.while_stmt(
test=lambda: i < n,
body=body,
get_state=None,
set_state=None,
symbol_names=('i', 's'),
opts={})
self.assertEqual(s, 1234)
def test_python_with_tensor_state(self):
def body():
nonlocal i, s
s = s * 10 + i
i += 1
i = 0
s = 0
n = 5
control_flow.while_stmt(
test=lambda: i < n,
body=body,
get_state=None,
set_state=None,
symbol_names=('i', 's'),
opts={})
self.assertEqual(i, 5)
self.assertEqual(s, 1234)
class IfStmtTest(unittest.TestCase):
def test_python(self):
def test_fn(cond):
def body():
nonlocal i
i = 1
def orelse():
nonlocal i
i = -1
i = None
control_flow.if_stmt(
cond=cond,
body=body,
orelse=orelse,
get_state=None,
set_state=None,
symbol_names=('i',),
nouts=1)
return i
self.assertEqual(test_fn(True), 1)
self.assertEqual(test_fn(False), -1)
def test_python_multiple_returns(self):
def test_fn(cond):
def body():
nonlocal i, j
i = 1
j = 2
def orelse():
nonlocal i, j
i = -1
j = -2
i, j = None, None
control_flow.if_stmt(
cond=cond,
body=body,
orelse=orelse,
get_state=None,
set_state=None,
symbol_names=('i', 'j'),
nouts=2)
return i, j
self.assertEqual(test_fn(True), (1, 2))
self.assertEqual(test_fn(False), (-1, -2))
|
DALI-main
|
dali/test/python/autograph/operators/test_control_flow.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for py_builtins module."""
import unittest
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.core import function_wrappers
from nvidia.dali._autograph.operators import py_builtins
class TestBase(object):
def overridden_method(self, x):
return x + 20
class PyBuiltinsTest(unittest.TestCase):
def test_abs(self):
self.assertEqual(py_builtins.abs_(-1), 1)
def test_float(self):
self.assertEqual(py_builtins.float_(10), 10.0)
self.assertEqual(py_builtins.float_('10.0'), 10.0)
def test_int(self):
self.assertEqual(py_builtins.int_(10.0), 10)
self.assertEqual(py_builtins.int_('11', 2), 3)
def test_int_unsupported_base(self):
t = 1.0
with self.assertRaises(TypeError):
py_builtins.int_(t, 2)
def test_len(self):
self.assertEqual(py_builtins.len_([1, 2, 3]), 3)
def test_len_scalar(self):
with self.assertRaises(TypeError):
py_builtins.len_(1)
def test_max(self):
self.assertEqual(py_builtins.max_([1, 3, 2]), 3)
self.assertEqual(py_builtins.max_(0, 2, 1), 2)
def test_min(self):
self.assertEqual(py_builtins.min_([2, 1, 3]), 1)
self.assertEqual(py_builtins.min_(2, 0, 1), 0)
def test_range(self):
self.assertListEqual(list(py_builtins.range_(3)), [0, 1, 2])
self.assertListEqual(list(py_builtins.range_(1, 3)), [1, 2])
self.assertListEqual(list(py_builtins.range_(2, 0, -1)), [2, 1])
def test_enumerate(self):
self.assertListEqual(
list(py_builtins.enumerate_([3, 2, 1])), [(0, 3), (1, 2), (2, 1)])
self.assertListEqual(
list(py_builtins.enumerate_([3, 2, 1], 5)), [(5, 3), (6, 2), (7, 1)])
self.assertListEqual(list(py_builtins.enumerate_([-8], -3)), [(-3, -8)])
def test_zip(self):
self.assertListEqual(
list(py_builtins.zip_([3, 2, 1], [1, 2, 3])), [(3, 1), (2, 2), (1, 3)])
self.assertListEqual(
list(py_builtins.zip_([4, 5, 6], [-1, -2])), [(4, -1), (5, -2)])
def test_map(self):
def increment(x):
return x + 1
add_list = lambda x, y: x + y
self.assertListEqual(
list(py_builtins.map_(increment, [4, 5, 6])), [5, 6, 7])
self.assertListEqual(
list(py_builtins.map_(add_list, [3, 2, 1], [-1, -2, -3])), [2, 0, -2])
def test_next_normal(self):
iterator = iter([1, 2, 3])
self.assertEqual(py_builtins.next_(iterator), 1)
self.assertEqual(py_builtins.next_(iterator), 2)
self.assertEqual(py_builtins.next_(iterator), 3)
with self.assertRaises(StopIteration):
py_builtins.next_(iterator)
self.assertEqual(py_builtins.next_(iterator, 4), 4)
def _basic_function_scope(self):
return function_wrappers.FunctionScope(
'test_function_name',
'test_scope', # Note: this must match the name in the `with` statement.
converter.ConversionOptions())
def test_eval_in_original_context(self):
def test_fn():
l = 1 # pylint:disable=unused-variable # noqa: F841
with self._basic_function_scope() as test_scope:
return py_builtins.eval_in_original_context(eval, ('l',), test_scope)
self.assertEqual(test_fn(), 1)
def test_eval_in_original_context_inner_function(self):
def test_fn():
l = 1 # pylint:disable=unused-variable # noqa: F841
with self._basic_function_scope() as test_scope:
def inner_fn():
# Note: a user function without a top-level function scope should
# never be found in user code; it's only possible in generated code.
l = 2 # pylint:disable=unused-variable # noqa: F841
return py_builtins.eval_in_original_context(eval, ('l',), test_scope)
return inner_fn()
self.assertEqual(test_fn(), 2)
def test_locals_in_original_context(self):
def test_fn():
l = 1 # pylint:disable=unused-variable # noqa: F841
with self._basic_function_scope() as test_scope:
return py_builtins.locals_in_original_context(test_scope)
locs = test_fn()
self.assertEqual(locs['l'], 1)
def test_locals_in_original_context_inner_function(self):
def test_fn():
l = 1 # pylint:disable=unused-variable # noqa: F841
with self._basic_function_scope() as test_scope:
def inner_fn():
# Note: a user function without a top-level function scope should
# never be found in user code; it's only possible in generated code.
l = 2 # pylint:disable=unused-variable # noqa: F841
return py_builtins.locals_in_original_context(test_scope)
return inner_fn()
locs = test_fn()
self.assertEqual(locs['l'], 2)
def test_globals_in_original_context(self):
def test_fn():
with self._basic_function_scope() as test_scope:
return py_builtins.globals_in_original_context(test_scope)
globs = test_fn()
self.assertIs(globs['TestBase'], TestBase)
def test_globals_in_original_context_inner_function(self):
def test_fn():
with self._basic_function_scope() as test_scope:
def inner_fn():
# Note: a user function without a top-level function scope should
# never be found in user code; it's only possible in generated code.
return py_builtins.globals_in_original_context(test_scope)
return inner_fn()
globs = test_fn()
self.assertIs(globs['TestBase'], TestBase)
def test_super_in_original_context_unary_call(self):
test_case_self = self
class TestSubclass(TestBase):
def overridden_method(self, x):
test_case_self.fail('This should never be called.')
def test_method(self):
with test_case_self._basic_function_scope() as test_scope:
test_base_unbound = py_builtins.super_in_original_context(
super, (TestSubclass,), test_scope)
test_base = test_base_unbound.__get__(self, TestSubclass)
return test_base.overridden_method(1)
tc = TestSubclass()
self.assertEqual(tc.test_method(), 21)
def test_super_in_original_context_binary_call(self):
test_case_self = self
class TestSubclass(TestBase):
def overridden_method(self, x):
test_case_self.fail('This should never be called.')
def test_method(self):
with test_case_self._basic_function_scope() as test_scope:
test_base = py_builtins.super_in_original_context(
super, (TestSubclass, self), test_scope)
return test_base.overridden_method(1)
tc = TestSubclass()
self.assertEqual(tc.test_method(), 21)
def test_super_in_original_context_niladic_call(self):
test_case_self = self
class TestSubclass(TestBase):
def overridden_method(self, x):
test_case_self.fail('This should never be called.')
def test_method(self):
with test_case_self._basic_function_scope() as test_scope:
b = py_builtins.super_in_original_context(super, (), test_scope)
return b.overridden_method(1)
tc = TestSubclass()
self.assertEqual(tc.test_method(), 21)
def test_super_in_original_context_caller_with_locals(self):
test_case_self = self
class TestSubclass(TestBase):
def overridden_method(self, x):
test_case_self.fail('This should never be called.')
def test_method(self, x):
y = 7
with test_case_self._basic_function_scope() as test_scope:
z = 7
return py_builtins.super_in_original_context(
super, (), test_scope).overridden_method(x + y - z)
tc = TestSubclass()
self.assertEqual(tc.test_method(1), 21)
def test_super_in_original_context_inner_function(self):
test_case_self = self
class TestSubclass(TestBase):
def overridden_method(self, x):
test_case_self.fail('This should never be called.')
def test_method(self, x):
with test_case_self._basic_function_scope() as test_scope:
# Oddly, it's sufficient to use `self` in an inner function
# to gain access to __class__ in this scope.
# TODO(mdan): Is this true across implementations?
# Note: normally, it's illegal to use super() in inner functions (it
# throws an error), but the generated code may create them.
def inner_fn():
return py_builtins.super_in_original_context(
super, (), test_scope).overridden_method(x)
return inner_fn()
tc = TestSubclass()
self.assertEqual(tc.test_method(1), 21)
def test_super_in_original_context_inner_lambda(self):
test_case_self = self
class TestSubclass(TestBase):
def overridden_method(self, x):
test_case_self.fail('This should never be called.')
def test_method(self, x):
with test_case_self._basic_function_scope() as test_scope:
# Oddly, it's sufficient to use `self` in an inner function
# to gain access to __class__ in this scope.
# TODO(mdan): Is this true across implementations?
# Note: normally, it's illegal to use super() in inner functions (it
# throws an error), but the generated code may create them.
l = lambda: py_builtins.super_in_original_context( # pylint:disable=g-long-lambda
super, (), test_scope).overridden_method(x)
return l()
tc = TestSubclass()
self.assertEqual(tc.test_method(1), 21)
def test_filter(self):
self.assertListEqual(
list(py_builtins.filter_(lambda x: x == 'b', ['a', 'b', 'c'])), ['b'])
self.assertListEqual(
list(py_builtins.filter_(lambda x: x < 3, [3, 2, 1])), [2, 1])
def test_any(self):
self.assertEqual(py_builtins.any_([False, True, False]), True)
self.assertEqual(py_builtins.any_([False, False, False]), False)
def test_all(self):
self.assertEqual(py_builtins.all_([False, True, False]), False)
self.assertEqual(py_builtins.all_([True, True, True]), True)
def test_sorted(self):
self.assertListEqual(py_builtins.sorted_([2, 3, 1]), [1, 2, 3])
self.assertListEqual(
py_builtins.sorted_([2, 3, 1], key=lambda x: -x), [3, 2, 1])
self.assertListEqual(
py_builtins.sorted_([2, 3, 1], reverse=True), [3, 2, 1])
self.assertListEqual(
py_builtins.sorted_([2, 3, 1], key=lambda x: -x, reverse=True),
[1, 2, 3])
self.assertEqual(
py_builtins.sorted_([[4, 3], [2, 1]], key=lambda x: sum(x)),
[[2, 1], [4, 3]])
|
DALI-main
|
dali/test/python/autograph/operators/test_py_builtins.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for python_lang_utils module."""
import unittest
from nvidia.dali._autograph.operators import variables
class SpecialValuesTest(unittest.TestCase):
def test_undefined(self):
undefined_symbol = variables.Undefined('name')
undefined_symbol2 = variables.Undefined('name')
self.assertEqual(undefined_symbol.symbol_name, 'name')
self.assertEqual(undefined_symbol2.symbol_name, 'name')
self.assertNotEqual(undefined_symbol, undefined_symbol2)
def test_undefined_operations(self):
undefined_symbol = variables.Undefined('name')
self.assertIsInstance(undefined_symbol.foo, variables.Undefined)
self.assertIsInstance(undefined_symbol[0], variables.Undefined)
self.assertNotIsInstance(undefined_symbol.__class__, variables.Undefined)
def test_read(self):
self.assertEqual(variables.ld(1), 1)
o = object()
self.assertEqual(variables.ld(o), o)
self.assertIsNone(variables.ld(None))
def test_read_undefined(self):
with self.assertRaisesRegex(UnboundLocalError, 'used before assignment'):
variables.ld(variables.Undefined('a'))
|
DALI-main
|
dali/test/python/autograph/operators/test_variables.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for logical_expressions module."""
from nvidia.dali._autograph.converters import logical_expressions
from nvidia.dali._autograph.core import converter_testing
class LogicalExpressionTest(converter_testing.TestCase):
def test_equals(self):
def f(a, b):
return a == b
tr = self.transform(f, logical_expressions)
self.assertTrue(tr(1, 1))
self.assertFalse(tr(1, 2))
def test_bool_ops(self):
def f(a, b, c):
return (a or b) and (a or b or c) and not c
tr = self.transform(f, logical_expressions)
self.assertTrue(tr(True, False, False))
self.assertFalse(tr(True, False, True))
def test_comparison(self):
def f(a, b, c, d):
return a < b == c > d
tr = self.transform(f, logical_expressions)
# Note: having just the first constant a tensor tests that the
# operations execute in the correct order. If anything other than
# a < b executed first, the result would be a Python scalar and not a
# Tensor. This is valid as long as the dispat is automatic based on
# type.
self.assertTrue(tr(1, 2, 2, 1))
self.assertFalse(tr(1, 2, 2, 3))
def test_default_ops(self):
def f(a, b):
return a in b
tr = self.transform(f, logical_expressions)
self.assertTrue(tr('a', ('a',)))
def test_unary_ops(self):
def f(a):
return ~a, -a, +a
tr = self.transform(f, logical_expressions)
self.assertEqual(tr(1), (-2, -1, 1))
|
DALI-main
|
dali/test/python/autograph/converters/test_logical_expressions.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for break_statements module."""
from nvidia.dali._autograph.converters import break_statements
from nvidia.dali._autograph.core import converter_testing
from nvidia.dali._autograph.pyct import anno
class BreakCanonicalizationTest(converter_testing.TestCase):
def assertTransformedEquivalent(self, f, *inputs):
tr = self.transform(f, break_statements)
self.assertEqual(f(*inputs), tr(*inputs))
def test_while_loop(self):
def f(x):
v = []
while x > 0:
x -= 1
if x % 2 == 0:
break
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 4)
def test_while_loop_preserves_directives(self):
def f(x):
while x > 0:
x -= 1
if x % 2 == 0:
break
_, node, ctx = self.transform(f, (), include_ast=True)
fake_annotation = object()
anno.setanno(node.body[0], anno.Basic.DIRECTIVES, fake_annotation)
node = break_statements.transform(node, ctx)
self.assertIs(
anno.getanno(node.body[1], anno.Basic.DIRECTIVES), fake_annotation)
def test_for_loop(self):
def f(a):
v = []
for x in a:
x -= 1
if x % 2 == 0:
break
v.append(x)
return v
tr = self.transform(f, break_statements)
self.assertEqual([3], tr([5, 4]))
def test_for_loop_preserves_directives(self):
def f(a):
for x in a:
if x % 2 == 0:
break
_, node, ctx = self.transform(f, (), include_ast=True)
fake_annotation = object()
anno.setanno(node.body[0], anno.Basic.DIRECTIVES, fake_annotation)
node = break_statements.transform(node, ctx)
self.assertIs(
anno.getanno(node.body[1], anno.Basic.DIRECTIVES), fake_annotation)
def test_nested(self):
def f(x):
v = []
u = []
w = []
while x > 0:
x -= 1
if x % 2 == 0:
if x % 3 != 0:
u.append(x)
else:
w.append(x)
break
v.append(x)
return v, u, w
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 11)
def test_nested_loops(self):
def f(x):
v = []
u = []
while x > 0:
x -= 1
y = x
while y > 0:
y -= 1
if y % 2 == 0:
break
u.append(y)
if x == 0:
break
v.append(x)
return v, u
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 5)
def test_loop_orelse(self):
def f(x):
v = []
u = []
while x > 0:
x -= 1
y = x
while y > 1:
break
else:
u.append(y)
break
v.append(x)
return v, u
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, 3)
def test_multiple_correlated_breaks_with_side_effects(self):
def f(cond1):
lst = []
while True:
if cond1:
lst.append(1)
else:
break
if lst[-1] > 0: # lst always has an element here
break
return lst
self.assertTransformedEquivalent(f, True)
self.assertTransformedEquivalent(f, False)
|
DALI-main
|
dali/test/python/autograph/converters/test_break_statements.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for directives module."""
from nvidia.dali._autograph.converters import directives as directives_converter
from nvidia.dali._autograph.core import converter_testing
from nvidia.dali._autograph.lang import directives
from nvidia.dali._autograph.pyct import anno
class DirectivesTest(converter_testing.TestCase):
def test_local_target(self):
def f():
l = []
string_var = 0
directives.set_element_type(l, 'a', string_var)
_, node, _ = self.transform(f, directives_converter, include_ast=True)
def_, = anno.getanno(node.body[0].targets[0],
anno.Static.DEFINITIONS)
d = def_.directives[directives.set_element_type]
self.assertEqual(d['dtype'].value, 'a')
self.assertEqual(d['shape'].id, 'string_var')
def test_argument_target(self):
def f(a):
directives.set_element_type(a, 1, shape=2)
pass
_, node, _ = self.transform(f, directives_converter, include_ast=True)
def_, = anno.getanno(node.args.args[0], anno.Static.DEFINITIONS)
d = def_.directives[directives.set_element_type]
self.assertEqual(d['dtype'].value, 1)
self.assertEqual(d['shape'].value, 2)
def test_loop_target(self):
def f():
a = True # noqa: F841
while True:
directives.set_loop_options(parallel_iterations=10)
pass
_, node, _ = self.transform(f, directives_converter, include_ast=True)
d = anno.getanno(node.body[1], anno.Basic.DIRECTIVES)
d = d[directives.set_loop_options]
self.assertEqual(d['parallel_iterations'].value, 10)
self.assertNotIn('swap_memory', d)
def test_loop_target_no_loop(self):
def f():
directives.set_loop_options()
pass
with self.assertRaisesRegex(ValueError, 'must be used inside a statement'):
self.transform(f, directives_converter, include_ast=True)
def test_loop_target_not_first(self):
def f():
a = 1
while True:
a = 2
directives.set_loop_options(parallel_iterations=10, back_prop=a) # pylint: disable=unexpected-keyword-arg # noqa: E501
with self.assertRaisesRegex(ValueError, 'must be the first statement'):
self.transform(f, directives_converter, include_ast=True)
def test_value_verification_does_not_trigger_properties(self):
self_test = self
class TestClass(object):
@property
def b(self):
self_test.fail('This should never be evaluated')
tc = TestClass()
def f():
return tc.b + 1
_, node, _ = self.transform(f, directives_converter, include_ast=True)
self.assertIsNotNone(node)
def test_value_verification_does_not_trigger_getattr(self):
class TestClass(object):
def __init__(self):
self.getattr_called = False
def __getattr__(self, _):
# Note: seems that any exception raised here is absorbed by hasattr.
# So we can't call test.fail or raise.
self.getattr_called = True
tc = TestClass()
def f():
return tc.b + 1
_, node, _ = self.transform(f, directives_converter, include_ast=True)
self.assertIsNotNone(node)
self.assertFalse(tc.getattr_called)
|
DALI-main
|
dali/test/python/autograph/converters/test_directives.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slices module."""
from nvidia.dali._autograph.converters import directives as directives_converter
from nvidia.dali._autograph.converters import slices
from nvidia.dali._autograph.core import converter_testing
class SliceTest(converter_testing.TestCase):
def test_index_access(self):
def f(l):
return l[1]
tr = self.transform(f, (directives_converter, slices))
tl = [1, 2]
y = tr(tl)
self.assertEqual(2, y)
def test_index_access_multiple_definitions(self):
def f(l):
if l:
l = []
return l[1]
self.transform(f, (directives_converter, slices))
|
DALI-main
|
dali/test/python/autograph/converters/test_slices.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for list_comprehensions module."""
from nvidia.dali._autograph.converters import list_comprehensions
from nvidia.dali._autograph.core import converter_testing
class ListCompTest(converter_testing.TestCase):
def assertTransformedEquivalent(self, f, *inputs):
tr = self.transform(f, list_comprehensions)
self.assertEqual(f(*inputs), tr(*inputs))
def test_basic(self):
def f(l):
s = [e * e for e in l]
return s
self.assertTransformedEquivalent(f, [])
self.assertTransformedEquivalent(f, [1, 2, 3])
def test_multiple_generators(self):
def f(l):
s = [e * e for sublist in l for e in sublist] # pylint:disable=g-complex-comprehension
return s
self.assertTransformedEquivalent(f, [])
self.assertTransformedEquivalent(f, [[1], [2], [3]])
def test_cond(self):
def f(l):
s = [e * e for e in l if e > 1]
return s
self.assertTransformedEquivalent(f, [])
self.assertTransformedEquivalent(f, [1, 2, 3])
|
DALI-main
|
dali/test/python/autograph/converters/test_list_comprehensions.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
DALI-main
|
dali/test/python/autograph/converters/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for continue_statements module."""
from nvidia.dali._autograph.converters import continue_statements
from nvidia.dali._autograph.core import converter_testing
class ContinueCanonicalizationTest(converter_testing.TestCase):
def assertTransformedEquivalent(self, f, *inputs):
tr = self.transform(f, continue_statements)
self.assertEqual(f(*inputs), tr(*inputs))
def test_basic(self):
def f(x):
v = []
while x > 0:
x -= 1
if x % 2 == 0:
continue
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_multiple_continues(self):
def f(x):
v = []
while x > 0:
x -= 1
if x > 1:
continue
if x > 2:
continue
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_multiple_continues_in_nested_scope(self):
def f(a):
v = []
for x in a:
x -= 1
if x > 100:
continue
try:
raise ValueError('intentional')
except ValueError:
continue
v.append(x)
return v
self.assertTransformedEquivalent(f, [])
self.assertTransformedEquivalent(f, [1])
self.assertTransformedEquivalent(f, [2])
self.assertTransformedEquivalent(f, [1, 2, 3])
def test_for_loop(self):
def f(a):
v = []
for x in a:
x -= 1
if x % 2 == 0:
continue
v.append(x)
return v
self.assertTransformedEquivalent(f, [])
self.assertTransformedEquivalent(f, [1])
self.assertTransformedEquivalent(f, [2])
self.assertTransformedEquivalent(f, [1, 2, 3])
def test_nested_with(self):
def f(x):
v = []
while x > 0:
x -= 1
if x % 2 == 0:
continue
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_nested_multiple_withs(self):
def f(x):
v = []
while x > 0:
x -= 1
if x % 2 == 0:
continue
v.append(x)
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_nested_multiple_withs_and_statements(self):
def f(x):
v = []
while x > 0:
x -= 1
if x % 2 == 0:
continue
v.append(x)
v.append(x)
v.append(x)
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_nested_multiple_withs_and_nested_withs(self):
def f(x):
v = []
while x > 0:
x -= 1
if x % 2 == 0:
continue
v.append(x)
v.append(x)
v.append(x)
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_nested(self):
def f(x):
v = []
u = []
w = []
while x > 0:
x -= 1
if x % 2 == 0:
if x % 3 != 0:
u.append(x)
else:
w.append(x)
continue
v.append(x)
return v, u, w
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_multiple_guarded_continues_with_side_effects(self):
def f(x):
def track(u, x):
u.append(x)
return x
u = []
v = []
while x > 0:
x -= 1
if track(u, x) > 1:
continue
if track(u, x) > 2:
continue
v.append(x)
return u, v
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 2)
|
DALI-main
|
dali/test/python/autograph/converters/test_continue_statements.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lists module."""
from nvidia.dali._autograph.converters import directives as directives_converter
from nvidia.dali._autograph.converters import lists
from nvidia.dali._autograph.core import converter_testing
from nvidia.dali._autograph.utils import hooks
class TestList(list):
pass
class OperatorList(hooks.OperatorBase):
def is_test_list(self, iterable):
return isinstance(iterable, TestList)
def detect_overload_list_new(self, iterable):
return True
def list_new(self, iterable):
return TestList(iterable)
def detect_overload_list_append(self, list_):
return self.is_test_list(list_)
def list_append(self, list_, x):
list_.append(x)
list_ = TestList(list_)
return TestList(list_)
def detect_overload_list_pop(self, list_):
return self.is_test_list(list_)
def list_pop(self, list_, i):
if i is None:
x = list_.pop()
else:
x = list_.pop(i)
return list_, x
class ListTest(converter_testing.TestCase):
def test_empty_list(self):
def f():
return []
tr = self.transform(f, lists, operator_overload=OperatorList())
tl = tr()
# Empty tensor lists cannot be evaluated or stacked.
self.assertIsInstance(tl, TestList)
def test_initialized_list(self):
def f():
return [1, 2, 3]
tr = self.transform(f, lists, operator_overload=OperatorList())
tl = tr()
self.assertIsInstance(tl, TestList)
self.assertEqual(tl, [1, 2, 3])
def test_list_append(self):
def f():
l = TestList([1])
l.append(2)
l.append(3)
return l
tr = self.transform(f, lists, operator_overload=OperatorList())
tl = tr()
self.assertIsInstance(tl, TestList)
self.assertEqual(tl, [1, 2, 3])
def test_list_pop(self):
def f():
l = TestList([1, 2, 3])
s = l.pop()
return s, l
tr = self.transform(f, (directives_converter, lists),
operator_overload=OperatorList())
ts, tl = tr()
self.assertIsInstance(tl, TestList)
self.assertEqual(tl, [1, 2])
self.assertEqual(ts, 3)
def test_double_list_pop(self):
def f(l):
s = l.pop().pop()
return s, l
tr = self.transform(f, lists, operator_overload=OperatorList())
test_input = [1, 2, [1, 2, 3]]
# TODO(mdan): Pass a list of lists of tensor when we fully support that.
# For now, we just pass a regular Python list of lists just to verify that
# the two pop calls are sequenced properly.
s, tl = tr(test_input)
self.assertIsInstance(tl, list)
self.assertEqual(s, 3)
# TODO(klecki): Revert the stack test
# def test_list_stack(self):
# def f():
# l = [1, 2, 3]
# return array_ops.stack(l)
# tr = self.transform(f, lists, operator_overload=OperatorList())
# self.assertAllEqual(self.evaluate(tr()), [1, 2, 3])
|
DALI-main
|
dali/test/python/autograph/converters/test_lists.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conditional_expressions module."""
from nvidia.dali._autograph.converters import conditional_expressions
from nvidia.dali._autograph.core import converter_testing
class ConditionalExpressionsTest(converter_testing.TestCase):
def assertTransformedEquivalent(self, f, *inputs):
tr = self.transform(f, conditional_expressions)
self.assertEqual(f(*inputs), tr(*inputs))
def test_basic(self):
def f(x):
return 1 if x else 0
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 3)
def test_nested_orelse(self):
def f(x):
y = x * x if x > 0 else x if x else 1
return y
self.assertTransformedEquivalent(f, -2)
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 2)
|
DALI-main
|
dali/test/python/autograph/converters/test_conditional_expressions.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for functions module."""
from nvidia.dali._autograph.converters import functions
from nvidia.dali._autograph.converters import return_statements
from nvidia.dali._autograph.core import ag_ctx
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.core import converter_testing
from nvidia.dali._autograph.impl import api
CONSTANT = 1
class FunctionTransformer(converter_testing.TestCase):
def test_basic(self):
def f(l):
"""Docstring."""
a = 1
l += a
return l
tr = self.transform(f, functions)
result = tr(CONSTANT)
self.assertEqual(2, result)
self.assertEqual('Docstring.', tr.__doc__)
def test_multiline_docstring(self):
def f():
"""First sentence.
Second sentence.
Returns:
Something.
"""
return CONSTANT
tr = self.transform(f, functions)
result = tr()
self.assertEqual(CONSTANT, result)
self.assertIn('First sentence.', tr.__doc__)
self.assertIn('Second sentence.', tr.__doc__)
def test_nested_functions(self):
def f(l):
def inner_fn(i):
return i + 1
l += 1
return l, inner_fn(l)
tr = self.transform(f, (functions, return_statements))
first, second = tr(CONSTANT)
def test_conversion_context_preserves_in_inner_functions(self):
def inner_fn_callee():
self.assertEqual(
ag_ctx.control_status_ctx().status, ag_ctx.Status.DISABLED)
def f():
def inner_fn():
inner_fn_callee()
with ag_ctx.ControlStatusCtx(
ag_ctx.Status.DISABLED, converter.ConversionOptions(recursive=True)):
inner_fn()
tr = self.transform(f, functions)
tr()
def test_method(self):
class TestClass(object):
def f(self, l):
def inner_fn(i):
return i + 1
l += 1
return l, inner_fn(l)
tr = self.transform(TestClass.f, (functions, return_statements))
first, second = tr(TestClass(), CONSTANT)
def test_lambda_in_return_value(self):
def f():
return lambda x: x + 1
tr = self.transform(f, functions)
result_l = tr()
self.assertTrue(api.is_autograph_artifact(result_l))
|
DALI-main
|
dali/test/python/autograph/converters/test_functions.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for control_flow module."""
import collections
from nvidia.dali._autograph.converters import break_statements
from nvidia.dali._autograph.converters import continue_statements
from nvidia.dali._autograph.converters import control_flow
from nvidia.dali._autograph.core import converter_testing
from nvidia.dali._autograph.utils.all_utils import custom_constant
for_unaffected_global = None
for_mixed_globals_nonglobals = None
for_test_global_local = None
class ControlFlowTestBase(converter_testing.TestCase):
def assertValuesEqual(self, actual, expected):
self.assertEqual(actual, expected)
def assertTransformedResult(self, f, inputs, expected):
if not isinstance(inputs, tuple):
inputs = (inputs,)
tr = self.transform(f, control_flow)
returns = tr(*inputs)
self.assertValuesEqual(returns, expected)
class NestedControlFlowTest(ControlFlowTestBase):
def test_basic(self):
def f(n):
i = 0
j = 0
s = 0
while i < n:
while j < i:
j += 3
u = i + j # 'u' is not defined within the inner loop
s += u
i += 1
j = 0
return s, i, j, n
self.assertTransformedResult(f, custom_constant(5),
(25, 5, 0, 5))
def test_mixed_globals_nonglobals(self):
def f(n):
global for_mixed_globals_nonglobals
i = 0
j = 0
for_mixed_globals_nonglobals = 0
while i < n:
while j < i:
j += 3
u = i + j # 'u' is not defined within the inner loop
for_mixed_globals_nonglobals += u
i += 1
j = 0
return for_mixed_globals_nonglobals, i, j, n
self.assertTransformedResult(f, custom_constant(5),
(25, 5, 0, 5))
def test_composite_state_complex(self):
class TestClassX(object):
def __init__(self, x):
self.x = x
class TestClassY(object):
def __init__(self, y):
self.y = y
def f(n):
tc = TestClassX(TestClassY({'z': TestClassX(n)}))
if n > 0:
while n > 0:
if n < 2:
tc.x.y['z'].x += 1
n -= 1
return n, tc
tr = self.transform(f, control_flow)
n, tc = tr(custom_constant(5))
self.assertValuesEqual((n, tc.x.y['z'].x), (0, 6))
class WhileStatementTest(ControlFlowTestBase):
def test_basic(self):
def f(n):
i = 0
s = 0
while i < n:
s += i
i += 1
return s, i, n
self.assertTransformedResult(f, custom_constant(5), (10, 5, 5))
def test_single_output(self):
def f(n):
while n > 0:
n -= 1
return n
self.assertTransformedResult(f, custom_constant(5), 0)
def test_composite_state_attr(self):
class TestClass(object):
def __init__(self):
self.x = custom_constant(3)
def f(n):
tc = TestClass()
while n > 0:
tc.x += 1
n -= 1
return n
self.assertTransformedResult(f, custom_constant(5), 0)
def test_composite_state_slice(self):
def f(n):
d = {'a': n}
k = 'a'
while n > 0:
d[k] += 1
n -= 1
return d[k], n
self.assertTransformedResult(f, custom_constant(5), (10, 0))
def test_composite_state_literal_slice(self):
def f(n):
d = {'a': n}
while n > 0:
d['a'] += 1
n -= 1
return d['a'], n
self.assertTransformedResult(f, custom_constant(5), (10, 0))
def test_local_composite_attr(self):
class TestClass(object):
def __init__(self):
self.x = custom_constant(3)
def f(n):
while n > 0:
tc = TestClass()
tc.x = tc.x
n -= 1
return n
self.assertTransformedResult(f, custom_constant(5), 0)
def test_local_composite_slice(self):
def f(n):
while n > 0:
d = {'x': n}
k = 'x'
d[k] = d[k]
n -= 1
return n
self.assertTransformedResult(f, custom_constant(5), 0)
def test_local_composite_literal_slice(self):
def f(n):
while n > 0:
d = {'x': n}
d['x'] = d['x']
n -= 1
return n
self.assertTransformedResult(f, custom_constant(5), 0)
def test_non_tensor_state(self):
# This class is ok to be in a tf.while's state.
class TestClass(collections.namedtuple('TestClass', ('x'))):
pass
def f(n):
tc = TestClass([custom_constant(0)])
while n > 0:
tc = TestClass([custom_constant(3)])
tc.x[0] = tc.x[0] + 1
n -= 1
return tc.x[0]
self.assertTransformedResult(f, custom_constant(5), 4)
class IfStatementTest(ControlFlowTestBase):
def test_basic(self):
def f(n):
a = 0
b = 0
if n > 0:
a = -n
else:
b = 2 * n
return a, b
self.assertTransformedResult(f, custom_constant(1), (-1, 0))
self.assertTransformedResult(f, custom_constant(-1), (0, -2))
def test_complex_outputs(self):
class TestClass(object):
def __init__(self, a, b):
self.a = a
self.b = b
def f(n, obj):
obj.a = 0
obj.b = 0
if n > 0:
obj.a = -n
else:
obj.b = 2 * n
return obj
tr = self.transform(f, control_flow)
res_obj = tr(custom_constant(1), TestClass(0, 0))
self.assertValuesEqual((res_obj.a, res_obj.b), (-1, 0))
res_obj = tr(custom_constant(-1), TestClass(0, 0))
self.assertValuesEqual((res_obj.a, res_obj.b), (0, -2))
def test_single_output(self):
def f(n):
if n > 0:
n = -n
return n
self.assertTransformedResult(f, custom_constant(1), -1)
def test_unbalanced(self):
def f(n):
if n > 0:
n = 3
return n
self.assertTransformedResult(f, custom_constant(2), 3)
self.assertTransformedResult(f, custom_constant(-3), -3)
def test_unbalanced_raising(self):
def f(n):
if n > 0:
n = n + 1
raise ValueError()
return n
self.assertTransformedResult(f, -3, -3)
tr = self.transform(f, control_flow)
with self.assertRaises(ValueError):
tr(1)
def test_local_var(self):
def f(n):
if n > 0:
b = 4
n = b + 1
return n
self.assertTransformedResult(f, custom_constant(1), 5)
self.assertTransformedResult(f, custom_constant(-1), -1)
def test_local_remains_local(self):
def f(n):
if n > 0:
b = 4
n = b + 1
return n
self.assertTransformedResult(f, custom_constant(1), 5)
self.assertTransformedResult(f, custom_constant(-1), -1)
def test_global_local(self):
def f(n):
if n > 0:
global for_test_global_local
if for_test_global_local is None:
for_test_global_local = 1
else:
for_test_global_local += 1
n += for_test_global_local
return n
tr = self.transform(f, control_flow)
assert for_test_global_local is None
self.assertEqual(tr(1), 2)
self.assertEqual(for_test_global_local, 1)
def test_no_outputs(self):
def f(n):
if n > 0:
b = 4 # pylint:disable=unused-variable # noqa: F841
return n
self.assertTransformedResult(f, custom_constant(1), 1)
self.assertTransformedResult(f, custom_constant(-1), -1)
def test_created_outputs(self):
def f(i):
if i == 0:
result = i - 1
else:
result = i + 1
return result
self.assertTransformedResult(f, 0, -1)
self.assertTransformedResult(f, 1, 2)
def test_created_loop_local_outputs(self):
def f(n, x):
for i in n:
if i == 0:
result = i - 1
else:
result = i + 1
if result > 0:
x += 1
return x
self.assertTransformedResult(f, (range(5), 10), 14)
def test_created_loop_variable(self):
def f(n, x):
for i in n:
if i == 0:
result = i - 1
if i > 0: # Using the result from previous iteration.
if result < 0:
x += 1
return x
self.assertTransformedResult(f, (range(5), 10), 14)
def test_unaffected_global(self):
global for_unaffected_global
for_unaffected_global = 3
def f(i):
global for_unaffected_global
if i == 0:
for_unaffected_global = i - 1
return for_unaffected_global
self.assertTransformedResult(f, 1, 3)
self.assertTransformedResult(f, 0, -1)
self.assertEqual(for_unaffected_global, -1)
def test_unaffected_nonlocal(self):
def f(i):
def inner_fn():
nonlocal n
if i == 0:
n = i - 1
n = 3
inner_fn()
return n
self.assertTransformedResult(f, 1, 3)
self.assertTransformedResult(f, 0, -1)
def test_output_defined_in_prior_except(self):
def f(i):
try:
raise ValueError()
except ValueError:
x = 1
if i == 0:
x = i - 1
return x
self.assertTransformedResult(f, 1, 1)
self.assertTransformedResult(f, 0, -1)
def test_unbalanced_multiple_composites(self):
class Foo(object):
def __init__(self):
self.b = 2
self.c = 3
def f(x, condition):
z = 5
if condition:
x.b = 7
x.c = 11
z = 13
return x.b, x.c, z
self.assertTransformedResult(f, (Foo(), custom_constant(True)),
(7, 11, 13))
self.assertTransformedResult(f, (Foo(), custom_constant(False)),
(2, 3, 5))
def test_unbalanced_composite(self):
class Foo(object):
def __init__(self):
self.b = 2
def f(x, condition):
z = 5
if condition:
x.b = 7
z = 13
return x.b, z
self.assertTransformedResult(f, (Foo(), custom_constant(True)),
(7, 13))
self.assertTransformedResult(f, (Foo(), custom_constant(False)),
(2, 5))
class ForStatementTest(ControlFlowTestBase):
def test_basic(self):
def f(l):
s1 = 0
s2 = 0
for e in l:
s1 += e
s2 += e * e
return s1, s2
self.assertTransformedResult(f, custom_constant([1, 3]), (4, 10))
empty_vector = custom_constant([], shape=(0,), dtype=int)
self.assertTransformedResult(f, empty_vector, (0, 0))
def test_single_output(self):
def f(l):
s = 0
for e in l:
s += e
return s
self.assertTransformedResult(f, custom_constant([1, 3]), 4)
empty_vector = custom_constant([], shape=(0,), dtype=int)
self.assertTransformedResult(f, empty_vector, 0)
def test_iterated_expression(self):
eval_count = [0]
def count_evals(x):
eval_count[0] += 1
return x
def f(n):
s = 0
for e in count_evals(range(n)):
s += e
return s
tr = self.transform(f, control_flow)
self.assertEqual(tr(5), 10)
self.assertEqual(eval_count[0], 1)
def test_tuple_unpacking(self):
def f(x_list):
z = custom_constant(0) # pylint:disable=undefined-variable # noqa: F821
for i, x in enumerate(x_list):
z = z + x + i
return z
self.assertTransformedResult(f, [3, 3], 7)
def test_with_comprehension_in_body(self):
def f(l, n):
s = custom_constant(list(range(n)))
for _ in l:
s += custom_constant([a for a in range(n)])
return s
self.assertTransformedResult(f, (custom_constant([1, 2, 3]), 5),
list(range(5)) * 4)
class AdvancedControlFlowTest(ControlFlowTestBase):
def assertTransformedEquivalent(self, f, *inputs):
tr = self.transform(
f, (break_statements, continue_statements, control_flow))
self.assertEqual(f(*inputs), tr(*inputs))
def test_while_with_else(self):
def f(x):
while x > 2:
x /= 2
else:
x += 1
return x
self.assertTransformedEquivalent(f, 4)
self.assertTransformedEquivalent(f, 2)
def test_while_with_else_and_break(self):
def f(cond1):
x = 8
while x > 2:
x /= 2
if cond1:
break
else:
x += 1
return x
self.assertTransformedEquivalent(f, True)
self.assertTransformedEquivalent(f, False)
def test_for_with_else(self):
def f(l):
res = 0
for x in l:
res += x
else:
res += 1
return res
self.assertTransformedEquivalent(f, [])
self.assertTransformedEquivalent(f, [1, 2])
def test_for_with_else_and_break(self):
def f(flag):
l = [1, 2, 3]
res = 0
for x in l:
res += x
if flag:
break
else:
res += 1
return res
self.assertTransformedEquivalent(f, True)
self.assertTransformedEquivalent(f, False)
|
DALI-main
|
dali/test/python/autograph/converters/test_control_flow.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for call_trees module."""
import imp
from nvidia.dali._autograph.converters import call_trees
from nvidia.dali._autograph.converters import functions
from nvidia.dali._autograph.core import converter_testing
class MockConvertedCall(object):
def __init__(self):
self.calls = []
def __call__(self, f, args, kwargs, caller_fn_scope=None, options=None):
del caller_fn_scope, options
self.calls.append((args, kwargs))
kwargs = kwargs or {}
return f(*args, **kwargs)
class CallTreesTest(converter_testing.TestCase):
def _transform_with_mock(self, f):
mock = MockConvertedCall()
tr = self.transform(
f, (functions, call_trees),
ag_overrides={'converted_call': mock})
return tr, mock
def test_function_no_args(self):
def f(f):
return f() + 20
tr, mock = self._transform_with_mock(f)
self.assertEqual(tr(lambda: 1), 21)
self.assertListEqual(mock.calls, [((), None)])
def test_function_with_expression_in_argument(self):
def f(f, g):
return f(g() + 20) + 4000
tr, mock = self._transform_with_mock(f)
self.assertEqual(tr(lambda x: x + 300, lambda: 1), 4321)
self.assertListEqual(mock.calls, [
((), None),
((21,), None),
])
def test_function_with_call_in_argument(self):
def f(f, g):
return f(g()) + 300
tr, mock = self._transform_with_mock(f)
self.assertEqual(tr(lambda x: x + 20, lambda: 1), 321)
self.assertListEqual(mock.calls, [
((), None),
((1,), None),
])
def test_function_chaining(self):
def get_one():
return 1
def f():
return get_one().__add__(20)
tr, mock = self._transform_with_mock(f)
self.assertEqual(tr(), 21)
self.assertListEqual(mock.calls, [
((), None),
((20,), None),
])
def test_function_with_single_arg(self):
def f(f, a):
return f(a) + 20
tr, mock = self._transform_with_mock(f)
self.assertEqual(tr(lambda a: a, 1), 21)
self.assertListEqual(mock.calls, [((1,), None)])
def test_function_with_args_only(self):
def f(f, a, b):
return f(a, b) + 300
tr, mock = self._transform_with_mock(f)
self.assertEqual(tr(lambda a, b: a + b, 1, 20), 321)
self.assertListEqual(mock.calls, [((1, 20), None)])
def test_function_with_kwarg(self):
def f(f, a, b):
return f(a, c=b) + 300
tr, mock = self._transform_with_mock(f)
self.assertEqual(tr(lambda a, c: a + c, 1, 20), 321)
self.assertListEqual(mock.calls, [((1,), {'c': 20})])
def test_function_with_kwargs_starargs(self):
def f(f, a, *args, **kwargs):
return f(a, *args, **kwargs) + 5
tr, mock = self._transform_with_mock(f)
self.assertEqual(
tr(lambda *args, **kwargs: 7, 1, *[2, 3], **{
'b': 4,
'c': 5
}), 12)
self.assertListEqual(mock.calls, [((1, 2, 3), {'b': 4, 'c': 5})])
def test_function_with_starargs_only(self):
def g(*args):
return sum(args)
def f():
args = [1, 20, 300]
return g(*args) + 4000
tr, mock = self._transform_with_mock(f)
self.assertEqual(tr(), 4321)
self.assertListEqual(mock.calls, [((1, 20, 300), None)])
def test_function_with_starargs_mixed(self):
def g(a, b, c, d):
return a * 1000 + b * 100 + c * 10 + d
def f():
args1 = (1,)
args2 = [3]
return g(*args1, 2, *args2, 4)
tr, mock = self._transform_with_mock(f)
self.assertEqual(tr(), 1234)
self.assertListEqual(mock.calls, [((1, 2, 3, 4), None)])
def test_function_with_kwargs_keywords(self):
def f(f, a, b, **kwargs):
return f(a, b=b, **kwargs) + 5
tr, mock = self._transform_with_mock(f)
self.assertEqual(
tr(lambda *args, **kwargs: 7, 1, 2, **{'c': 3}), 12)
self.assertListEqual(mock.calls, [((1,), {'b': 2, 'c': 3})])
def test_function_with_multiple_kwargs(self):
def f(f, a, b, c, kwargs1, kwargs2):
return f(a, b=b, **kwargs1, c=c, **kwargs2) + 5
tr, mock = self._transform_with_mock(f)
self.assertEqual(
tr(lambda *args, **kwargs: 7, 1, 2, 3, {'d': 4}, {'e': 5}), 12)
self.assertListEqual(mock.calls, [((1,), {
'b': 2,
'c': 3,
'd': 4,
'e': 5
})])
def test_function_with_call_in_lambda_argument(self):
def h(l, a):
return l(a) + 4000
def g(a, *args):
return a + sum(args)
def f(h, g, a, *args):
return h(lambda x: g(x, *args), a)
tr, _ = self._transform_with_mock(f)
self.assertEqual(tr(h, g, 1, *(20, 300)), 4321)
def test_debugger_set_trace(self):
tracking_list = []
pdb = imp.new_module('fake_pdb')
pdb.set_trace = lambda: tracking_list.append(1)
def f():
return pdb.set_trace()
tr, _ = self._transform_with_mock(f)
tr()
self.assertListEqual(tracking_list, [1])
def test_class_method(self):
class TestClass(object):
def other_method(self, x):
return x + 20
def test_method(self, a):
return self.other_method(a) + 300
tc = TestClass()
tr, mock = self._transform_with_mock(TestClass.test_method)
self.assertEqual(321, tr(tc, 1))
self.assertListEqual(mock.calls, [((1,), None)])
def test_object_method(self):
class TestClass(object):
def other_method(self, x):
return x + 20
def test_method(self, a):
return self.other_method(a) + 300
tc = TestClass()
tr, mock = self._transform_with_mock(tc.test_method)
self.assertEqual(321, tr(tc, 1))
self.assertListEqual(mock.calls, [((1,), None)])
|
DALI-main
|
dali/test/python/autograph/converters/test_call_trees.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for return_statements module."""
from nvidia.dali._autograph.converters import functions
from nvidia.dali._autograph.converters import return_statements
from nvidia.dali._autograph.core import converter_testing
class SingleReturnTest(converter_testing.TestCase):
def assertTransformedEquivalent(self, f, *inputs):
tr = self.transform(f, (functions, return_statements))
self.assertEqual(f(*inputs), tr(*inputs))
def test_straightline(self):
def f(x):
return x * x
self.assertTransformedEquivalent(f, 2)
def test_superfluous_returns(self):
def f():
retval = 1
return retval
retval = 2 # pylint:disable=unreachable
return retval
self.assertTransformedEquivalent(f)
def test_superfluous_returns_adjacent(self):
def f():
return 1
return 2 # pylint:disable=unreachable
self.assertTransformedEquivalent(f)
def test_conditional(self):
def f(x):
if x > 0:
return x
else:
return x * x
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def test_conditional_missing_else(self):
def f(x):
if x > 0:
return x
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def test_conditional_missing_else_then_default(self):
def f(x):
if x > 0:
return x
return x * x
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def test_conditional_else_only_then_default(self):
def f(x):
if x < 0:
x *= x
else:
return x
return x
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def test_conditional_nested(self):
def f(x):
if x > 0:
if x < 5:
return x
else:
return x * x
else:
return x * x * x
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
self.assertTransformedEquivalent(f, 5)
def test_context_manager(self):
def f(x):
return x * x
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def test_context_manager_in_conditional(self):
def f(x):
if x > 0:
return x * x
else:
return x
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def text_conditional_in_context_manager(self):
def f(x):
if x > 0:
return x * x
else:
return x
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def test_no_return(self):
def f(x):
x *= x
self.assertTransformedEquivalent(f, 2)
def test_nested_function(self):
def f(x):
def inner_fn(y):
if y > 0:
return y * y
else:
return y
return inner_fn(x)
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def test_nested_function_in_control_flow(self):
def f(x):
if x:
def inner_fn(y):
return y
inner_fn(x)
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, -2)
def test_for_loop(self):
def f(n):
for _ in range(n):
return 1
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, 0)
def test_while_loop(self):
def f(n):
i = 0
s = 0
while i < n:
i += 1
s += i
if s > 4:
return s
return -1
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 2)
self.assertTransformedEquivalent(f, 4)
def test_null_return(self):
def f(n):
if n > 4:
return
return
self.assertTransformedEquivalent(f, 4)
self.assertTransformedEquivalent(f, 5)
def test_nested_multiple_withs(self):
def f(x):
v = []
while x > 0:
x -= 1
if x % 2 == 0:
return v
v.append(x)
v.append(x)
return v
self.assertTransformedEquivalent(f, 0)
self.assertTransformedEquivalent(f, 1)
self.assertTransformedEquivalent(f, 3)
self.assertTransformedEquivalent(f, 4)
def test_multiple_returns_in_nested_scope(self):
def f(a):
v = []
for x in a:
x -= 1
if x > 100:
return v
try:
raise ValueError('intentional')
except ValueError: # pylint:disable=bare-except # noqa: E722
return v
v.append(x)
return v
self.assertTransformedEquivalent(f, [])
self.assertTransformedEquivalent(f, [1])
self.assertTransformedEquivalent(f, [2])
self.assertTransformedEquivalent(f, [1, 2, 3])
|
DALI-main
|
dali/test/python/autograph/converters/test_return_statements.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variables module."""
from nvidia.dali._autograph.converters import variables
from nvidia.dali._autograph.core import converter_testing
class VariablesTest(converter_testing.TestCase):
def _transform_with_test_ld(self, f):
"""Generates code which adds 1 to all variable reads."""
return self.transform(f, variables, ag_overrides={'ld': lambda x: x + 1})
def test_read(self):
def f(l):
return l
tr = self._transform_with_test_ld(f)
self.assertEqual(tr(1), 2)
def test_aug_assign(self):
def f(l):
l *= 10
return l
tr = self._transform_with_test_ld(f)
self.assertEqual(tr(1), (1 + 1) * 10 + 1) # two reads
def test_del(self):
def f(l):
del l
return l # noqa: F821
tr = self.transform(f, variables)
with self.assertRaisesRegex(NameError, "'l' is used before assignment"):
tr(1)
def test_del_getitem_ignored_basic_slice(self):
def f(l):
del l[0]
return l
tr = self.transform(f, variables)
self.assertListEqual([2], tr([1, 2]))
def test_del_getitem_ignored_range_slice(self):
def f(l):
del l[0:2]
return l
tr = self.transform(f, variables)
self.assertListEqual([], tr([1, 2]))
def test_del_getattr_ignored(self):
def f(l):
del l.a
return l
class TestClass(object):
def __init__(self):
self.a = 1
self.b = 2
tr = self.transform(f, variables)
self.assertFalse(hasattr(tr(TestClass()), 'a'))
self.assertEqual(tr(TestClass()).b, 2)
def test_del_packing_ignored_list(self):
# Note: testing for UnboundLocalError, not NameError because in this case we
# don't rewrite the del.
def f(a, b):
del [a, b]
return a # noqa: F821
tr = self.transform(f, variables)
with self.assertRaises(UnboundLocalError):
tr(1, 2)
def test_del_packing_ignored_nested(self):
# Note: testing for UnboundLocalError, not NameError because in this case we
# don't rewrite the del.
def f(a, b, c):
del [a, (b, c)]
return c # noqa: F821
tr = self.transform(f, variables)
with self.assertRaises(UnboundLocalError):
tr(1, 2, 3)
def test_del_item_multiple_mixed_used_after(self):
def f(a, b, c):
del a, b, c[0]
a = 1
return a, b, c # noqa: F821
tr = self.transform(f, variables)
with self.assertRaisesRegex(NameError, "'b' is used before assignment"):
tr(1, 2, [1, 2])
def test_del_item_multiple_mixed_unused_after(self):
def f(a, b, c):
del a, b, c[0]
a = 1 # noqa: F841
b = 2 # noqa: F841
return c
tr = self.transform(f, variables)
self.assertListEqual([2], tr(1, 2, [1, 2]))
def test_attribute(self):
class TestClass(object):
def __init__(self):
self.v = 1
def __add__(self, other):
self.v += other
return self
def f(l):
return l.v
tc = TestClass()
tr = self._transform_with_test_ld(f)
self.assertEqual(tr(tc), 2)
def test_subscript(self):
class TestClass(object):
def __init__(self):
self.v = 1
def __add__(self, other):
self.v += other
return self
def __getitem__(self, _):
return self.v
def f(l):
return l[0]
tc = TestClass()
tr = self._transform_with_test_ld(f)
self.assertEqual(tr(tc), 2)
def test_call(self):
class TestClass(object):
def __init__(self):
self.v = 1
def __add__(self, other):
self.v += other
return self
def __call__(self):
return self.v
def f(l):
return l()
tc = TestClass()
tr = self._transform_with_test_ld(f)
self.assertEqual(tr(tc), 2)
|
DALI-main
|
dali/test/python/autograph/converters/test_variables.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for asserts module."""
from nvidia.dali._autograph.converters import asserts
from nvidia.dali._autograph.converters import functions
from nvidia.dali._autograph.converters import return_statements
from nvidia.dali._autograph.core import converter_testing
class AssertsTest(converter_testing.TestCase):
def test_basic(self):
def f(a):
assert a, 'testmsg'
return a
tr = self.transform(f, (functions, asserts, return_statements))
with self.assertRaisesRegex(AssertionError, 'testmsg'):
_ = tr(False)
|
DALI-main
|
dali/test/python/autograph/converters/test_asserts.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for parser module."""
import re
import textwrap
import unittest
import gast
from nvidia.dali._autograph.pyct import ast_util
from nvidia.dali._autograph.pyct import errors
from nvidia.dali._autograph.pyct import gast_util
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import pretty_printer
class ParserTest(unittest.TestCase):
def assertAstMatches(self, actual_node, expected_node_src, expr=True):
if expr:
# Ensure multi-line expressions parse.
expected_node = gast.parse('({})'.format(expected_node_src)).body[0]
expected_node = expected_node.value
else:
expected_node = gast.parse(expected_node_src).body[0]
msg = 'AST did not match expected:\n{}\nActual:\n{}'.format(
pretty_printer.fmt(expected_node),
pretty_printer.fmt(actual_node))
self.assertTrue(ast_util.matches(actual_node, expected_node), msg)
def test_parse_entity(self):
def f(x):
return x + 1
node, _ = parser.parse_entity(f, future_features=())
self.assertEqual('f', node.name)
def test_parse_lambda(self):
l = lambda x: x + 1
expected_node_src = 'lambda x: (x + 1)'
node, source = parser.parse_entity(l, future_features=())
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
def test_parse_lambda_prefix_cleanup(self):
lambda_lam = lambda x: x + 1
expected_node_src = 'lambda x: (x + 1)'
node, source = parser.parse_entity(lambda_lam, future_features=())
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
def test_parse_lambda_resolution_by_location(self):
_ = lambda x: x + 1
l = lambda x: x + 1
_ = lambda x: x + 1
expected_node_src = 'lambda x: (x + 1)'
node, source = parser.parse_entity(l, future_features=())
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
self.assertEqual(source, 'lambda x: x + 1')
def test_parse_lambda_resolution_by_signature(self):
l = lambda x: lambda x, y: x + y
node, source = parser.parse_entity(l, future_features=())
expected_node_src = 'lambda x: (lambda x, y: (x + y))'
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
self.assertEqual(source, 'lambda x: lambda x, y: x + y')
node, source = parser.parse_entity(l(0), future_features=())
expected_node_src = 'lambda x, y: (x + y)'
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
self.assertEqual(source, 'lambda x, y: x + y')
def test_parse_lambda_resolution_ambiguous(self):
l = lambda x: lambda x: 2 * x
expected_exception_text = re.compile(r'found multiple definitions'
r'.+'
r'\(?lambda x: \(?lambda x'
r'.+'
r'\(?lambda x: \(?2', re.DOTALL)
with self.assertRaisesRegex(
errors.UnsupportedLanguageElementError,
expected_exception_text):
parser.parse_entity(l, future_features=())
with self.assertRaisesRegex(
errors.UnsupportedLanguageElementError,
expected_exception_text):
parser.parse_entity(l(0), future_features=())
def assertMatchesWithPotentialGarbage(self, source, expected, garbage):
# In runtimes which don't track end_col_number, the source contains the
# entire line, which in turn may have garbage from the surrounding context.
self.assertIn(source, (expected, expected + garbage))
def test_parse_lambda_multiline(self):
l = (
lambda x: lambda y: x + y # pylint:disable=g-long-lambda
- 1)
node, source = parser.parse_entity(l, future_features=())
expected_node_src = 'lambda x: (lambda y: ((x + y) - 1))'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(
source, ('lambda x: lambda y: x + y # pylint:disable=g-long-lambda\n'
' - 1'), ')')
node, source = parser.parse_entity(l(0), future_features=())
expected_node_src = 'lambda y: ((x + y) - 1)'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(
source, ('lambda y: x + y # pylint:disable=g-long-lambda\n'
' - 1'), ')')
def test_parse_lambda_in_expression(self):
l = (
lambda x: lambda y: x + y + 1,
lambda x: lambda y: x + y + 2,
)
node, source = parser.parse_entity(l[0], future_features=())
expected_node_src = 'lambda x: (lambda y: ((x + y) + 1))'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(
source, 'lambda x: lambda y: x + y + 1', ',')
node, source = parser.parse_entity(l[0](0), future_features=())
expected_node_src = 'lambda y: ((x + y) + 1)'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(
source, 'lambda y: x + y + 1', ',')
node, source = parser.parse_entity(l[1], future_features=())
expected_node_src = 'lambda x: (lambda y: ((x + y) + 2))'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(source,
'lambda x: lambda y: x + y + 2', ',')
node, source = parser.parse_entity(l[1](0), future_features=())
expected_node_src = 'lambda y: ((x + y) + 2)'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(source, 'lambda y: x + y + 2', ',')
def test_parse_lambda_complex_body(self):
l = lambda x: ( # pylint:disable=g-long-lambda
x.y(
[],
x.z,
(),
x[0:2],
),
x.u,
'abc',
1,
)
node, source = parser.parse_entity(l, future_features=())
expected_node_src = "lambda x: (x.y([], x.z, (), x[0:2]), x.u, 'abc', 1)"
self.assertAstMatches(node, expected_node_src)
base_source = ('lambda x: ( # pylint:disable=g-long-lambda\n'
' x.y(\n'
' [],\n'
' x.z,\n'
' (),\n'
' x[0:2],\n'
' ),\n'
' x.u,\n'
' \'abc\',\n'
' 1,')
# The complete source includes the trailing parenthesis. But that is only
# detected in runtimes which correctly track end_lineno for ASTs.
self.assertMatchesWithPotentialGarbage(source, base_source, '\n )')
def test_parse_lambda_function_call_definition(self):
def do_parse_and_test(lam, **unused_kwargs):
node, source = parser.parse_entity(lam, future_features=())
expected_node_src = 'lambda x: x'
self.assertAstMatches(node, expected_node_src)
self.assertMatchesWithPotentialGarbage(
source, 'lambda x: x', ', named_arg=1)')
do_parse_and_test( # Intentional line break
lambda x: x, named_arg=1)
def test_parse_entity_print_function(self):
def f(x):
print(x)
node, _ = parser.parse_entity(f, future_features=('print_function',))
self.assertEqual('f', node.name)
def test_parse_comments(self):
def f():
# unindented comment
pass
node, _ = parser.parse_entity(f, future_features=())
self.assertEqual('f', node.name)
def test_parse_multiline_strings(self):
def f():
print("""
multiline
string""")
node, _ = parser.parse_entity(f, future_features=())
self.assertEqual('f', node.name)
def _eval_code(self, code, name):
globs = {}
exec(code, globs) # pylint:disable=exec-used
return globs[name]
def test_dedent_block_basic(self):
code = """
def f(x):
if x > 0:
return -x
return x
"""
f = self._eval_code(parser.dedent_block(code), 'f')
self.assertEqual(f(1), -1)
self.assertEqual(f(-1), -1)
def test_dedent_block_comments_out_of_line(self):
code = """
###
def f(x):
###
if x > 0:
###
return -x
###
###
return x
###
"""
f = self._eval_code(parser.dedent_block(code), 'f')
self.assertEqual(f(1), -1)
self.assertEqual(f(-1), -1)
def test_dedent_block_multiline_string(self):
code = """
def f():
'''
Docstring.
'''
return '''
1
2
3'''
"""
f = self._eval_code(parser.dedent_block(code), 'f')
self.assertEqual(f.__doc__, '\n Docstring.\n ')
self.assertEqual(f(), '\n 1\n 2\n 3')
def test_dedent_block_multiline_expression(self):
code = """
def f():
return (1,
2,
3)
"""
f = self._eval_code(parser.dedent_block(code), 'f')
self.assertEqual(f(), (1, 2, 3))
def test_dedent_block_continuation(self):
code = r"""
def f():
a = \
1
return a
"""
f = self._eval_code(parser.dedent_block(code), 'f')
self.assertEqual(f(), 1)
def test_dedent_block_continuation_in_string(self):
code = r"""
def f():
a = "a \
b"
return a
"""
f = self._eval_code(parser.dedent_block(code), 'f')
self.assertEqual(f(), 'a b')
def test_parse_expression(self):
node = parser.parse_expression('a.b')
self.assertEqual('a', node.value.id)
self.assertEqual('b', node.attr)
def test_unparse(self):
node = gast.If(
test=gast.Constant(1, kind=None),
body=[
gast_util.compat_assign(
targets=[
gast.Name(
'a',
ctx=gast.Store(),
annotation=None,
type_comment=None)
],
value=gast.Name(
'b', ctx=gast.Load(), annotation=None, type_comment=None),
type_comment=None)
],
orelse=[
gast_util.compat_assign(
targets=[
gast.Name(
'a',
ctx=gast.Store(),
annotation=None,
type_comment=None)
],
value=gast.Constant('c', kind=None),
type_comment=None)
])
source = parser.unparse(node, indentation=' ')
self.assertEqual(
textwrap.dedent("""
# coding=utf-8
if 1:
a = b
else:
a = 'c'
""").strip(), source.strip())
def test_ext_slice_roundtrip(self):
def ext_slice(n):
return n[:, :], n[0, :], n[:, 0]
node, _ = parser.parse_entity(ext_slice, future_features=())
source = parser.unparse(node)
self.assertAstMatches(node, source, expr=False)
|
DALI-main
|
dali/test/python/autograph/pyct/test_parser.py
|
# coding=utf-8
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for loader module."""
import inspect
import os
import textwrap
import unittest
import gast
from nvidia.dali._autograph.pyct import ast_util
from nvidia.dali._autograph.pyct import loader
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import pretty_printer
class LoaderTest(unittest.TestCase):
def assertAstMatches(self, actual_node, expected_node_src):
expected_node = gast.parse(expected_node_src).body[0]
msg = 'AST did not match expected:\n{}\nActual:\n{}'.format(
pretty_printer.fmt(expected_node),
pretty_printer.fmt(actual_node))
self.assertTrue(ast_util.matches(actual_node, expected_node), msg)
def test_parse_load_identity(self):
def test_fn(x):
a = True
b = ''
if a:
b = (x + 1)
return b
node, _ = parser.parse_entity(test_fn, future_features=())
module, _, _ = loader.load_ast(node)
source = inspect.getsource(module.test_fn)
expected_node_src = textwrap.dedent(inspect.getsource(test_fn))
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
def test_load_ast(self):
node = gast.FunctionDef(
name='f',
args=gast.arguments(
args=[
gast.Name(
'a', ctx=gast.Param(), annotation=None, type_comment=None)
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[]),
body=[
gast.Return(
gast.BinOp(
op=gast.Add(),
left=gast.Name(
'a',
ctx=gast.Load(),
annotation=None,
type_comment=None),
right=gast.Constant(1, kind=None)))
],
decorator_list=[],
returns=None,
type_comment=None)
module, source, _ = loader.load_ast(node)
expected_node_src = """
# coding=utf-8
def f(a):
return (a + 1)
"""
expected_node_src = textwrap.dedent(expected_node_src)
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
self.assertEqual(2, module.f(1))
with open(module.__file__, 'r') as temp_output:
self.assertAstMatches(node, temp_output.read())
def test_load_source(self):
test_source = textwrap.dedent(u"""
# coding=utf-8
def f(a):
'日本語 Δθₜ ← Δθₜ₋₁ + ∇Q(sₜ, aₜ)(rₜ + γₜ₊₁ max Q(⋅))'
return a + 1
""")
module, _ = loader.load_source(test_source, delete_on_exit=True)
self.assertEqual(module.f(1), 2)
self.assertEqual(
module.f.__doc__, '日本語 Δθₜ ← Δθₜ₋₁ + ∇Q(sₜ, aₜ)(rₜ + γₜ₊₁ max Q(⋅))')
def test_cleanup(self):
test_source = textwrap.dedent('')
_, filename = loader.load_source(test_source, delete_on_exit=True)
# Clean up the file before loader.py tries to remove it, to check that the
# latter can deal with that situation.
os.unlink(filename)
|
DALI-main
|
dali/test/python/autograph/pyct/test_loader.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for qual_names module."""
import textwrap
import unittest
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import qual_names
from nvidia.dali._autograph.pyct.qual_names import QN
from nvidia.dali._autograph.pyct.qual_names import resolve
class QNTest(unittest.TestCase):
def test_from_str(self):
a = QN('a')
b = QN('b')
a_dot_b = QN(a, attr='b')
a_sub_b = QN(a, subscript=b)
self.assertEqual(qual_names.from_str('a.b'), a_dot_b)
self.assertEqual(qual_names.from_str('a'), a)
self.assertEqual(qual_names.from_str('a[b]'), a_sub_b)
def test_basic(self):
a = QN('a')
self.assertEqual(a.qn, ('a',))
self.assertEqual(str(a), 'a')
self.assertEqual(a.ssf(), 'a')
self.assertEqual(a.ast().id, 'a')
self.assertFalse(a.is_composite())
with self.assertRaises(ValueError):
_ = a.parent
a_b = QN(a, attr='b')
self.assertEqual(a_b.qn, (a, 'b'))
self.assertEqual(str(a_b), 'a.b')
self.assertEqual(a_b.ssf(), 'a_b')
self.assertEqual(a_b.ast().value.id, 'a')
self.assertEqual(a_b.ast().attr, 'b')
self.assertTrue(a_b.is_composite())
self.assertEqual(a_b.parent.qn, ('a',))
def test_subscripts(self):
a = QN('a')
b = QN('b')
a_sub_b = QN(a, subscript=b)
self.assertEqual(a_sub_b.qn, (a, b))
self.assertEqual(str(a_sub_b), 'a[b]')
self.assertEqual(a_sub_b.ssf(), 'a_sub_b')
self.assertEqual(a_sub_b.ast().value.id, 'a')
self.assertEqual(a_sub_b.ast().slice.id, 'b')
self.assertTrue(a_sub_b.is_composite())
self.assertTrue(a_sub_b.has_subscript())
self.assertEqual(a_sub_b.parent.qn, ('a',))
c = QN('c')
b_sub_c = QN(b, subscript=c)
a_sub_b_sub_c = QN(a, subscript=b_sub_c)
self.assertEqual(a_sub_b_sub_c.qn, (a, b_sub_c))
self.assertTrue(a_sub_b_sub_c.is_composite())
self.assertTrue(a_sub_b_sub_c.has_subscript())
self.assertEqual(b_sub_c.qn, (b, c))
self.assertEqual(str(a_sub_b_sub_c), 'a[b[c]]')
self.assertEqual(a_sub_b_sub_c.ssf(), 'a_sub_b_sub_c')
self.assertEqual(a_sub_b_sub_c.ast().value.id, 'a')
self.assertEqual(a_sub_b_sub_c.ast().slice.value.id, 'b')
self.assertEqual(a_sub_b_sub_c.ast().slice.slice.id, 'c')
self.assertEqual(b_sub_c.ast().slice.id, 'c')
self.assertEqual(a_sub_b_sub_c.parent.qn, ('a',))
with self.assertRaises(ValueError):
QN('a', 'b')
def test_equality(self):
a = QN('a')
a2 = QN('a')
a_b = QN(a, attr='b')
self.assertEqual(a2.qn, ('a',))
with self.assertRaises(ValueError):
_ = a.parent
a_b2 = QN(a, attr='b')
self.assertEqual(a_b2.qn, (a, 'b'))
self.assertEqual(a_b2.parent.qn, ('a',))
self.assertTrue(a2 == a)
self.assertFalse(a2 is a)
self.assertTrue(a_b.parent == a)
self.assertTrue(a_b2.parent == a)
self.assertTrue(a_b2 == a_b)
self.assertFalse(a_b2 is a_b)
self.assertFalse(a_b2 == a)
a_sub_b = QN(a, subscript='b')
a_sub_b2 = QN(a, subscript='b')
self.assertTrue(a_sub_b == a_sub_b2)
self.assertFalse(a_sub_b == a_b)
def test_nested_attrs_subscripts(self):
a = QN('a')
b = QN('b')
c = QN('c')
b_sub_c = QN(b, subscript=c)
a_sub_b_sub_c = QN(a, subscript=b_sub_c)
b_dot_c = QN(b, attr='c')
a_sub__b_dot_c = QN(a, subscript=b_dot_c)
a_sub_b = QN(a, subscript=b)
a_sub_b__dot_c = QN(a_sub_b, attr='c')
a_dot_b = QN(a, attr='b')
a_dot_b_sub_c = QN(a_dot_b, subscript=c)
self.assertEqual(str(a_sub_b_sub_c), 'a[b[c]]')
self.assertEqual(str(a_sub__b_dot_c), 'a[b.c]')
self.assertEqual(str(a_sub_b__dot_c), 'a[b].c')
self.assertEqual(str(a_dot_b_sub_c), 'a.b[c]')
self.assertNotEqual(a_sub_b_sub_c, a_sub__b_dot_c)
self.assertNotEqual(a_sub_b_sub_c, a_sub_b__dot_c)
self.assertNotEqual(a_sub_b_sub_c, a_dot_b_sub_c)
self.assertNotEqual(a_sub__b_dot_c, a_sub_b__dot_c)
self.assertNotEqual(a_sub__b_dot_c, a_dot_b_sub_c)
self.assertNotEqual(a_sub_b__dot_c, a_dot_b_sub_c)
def test_hashable(self):
d = {QN('a'): 'a', QN('b'): 'b'}
self.assertEqual(d[QN('a')], 'a')
self.assertEqual(d[QN('b')], 'b')
self.assertNotIn(QN('c'), d)
def test_literals(self):
a = QN('a')
a_sub_str_b = QN(a, subscript=QN(qual_names.Literal('b')))
a_sub_b = QN(a, subscript=QN('b'))
self.assertNotEqual(a_sub_str_b, a_sub_b)
self.assertNotEqual(hash(a_sub_str_b), hash(a_sub_b))
self.assertEqual(a_sub_str_b.ast().slice.value, 'b')
self.assertEqual(str(a_sub_str_b), "a['b']")
a_sub_three = QN(a, subscript=QN(qual_names.Literal(3)))
self.assertEqual(a_sub_three.ast().slice.value, 3)
self.assertEqual(str(a_sub_three), 'a[3]')
def test_support_set(self):
a = QN('a')
b = QN('b')
c = QN('c')
a_sub_b = QN(a, subscript=b)
a_dot_b = QN(a, attr='b')
a_dot_b_dot_c = QN(a_dot_b, attr='c')
a_dot_b_sub_c = QN(a_dot_b, subscript=c)
self.assertSetEqual(a.support_set, set((a,)))
self.assertSetEqual(a_sub_b.support_set, set((a, b)))
self.assertSetEqual(a_dot_b.support_set, set((a,)))
self.assertSetEqual(a_dot_b_dot_c.support_set, set((a,)))
self.assertSetEqual(a_dot_b_sub_c.support_set, set((a, c)))
def test_comparison(self):
less_than_apos = chr(ord('\'') - 1)
self.assertGreater(QN('z'), QN(qual_names.Literal('a')))
self.assertLess(QN(less_than_apos), QN(qual_names.Literal('a')))
self.assertGreater(QN(qual_names.Literal('z')), QN(less_than_apos))
self.assertLess(QN(qual_names.Literal('a')), QN('z'))
class QNResolverTest(unittest.TestCase):
def assertQNStringIs(self, node, qn_str):
self.assertEqual(str(anno.getanno(node, anno.Basic.QN)), qn_str)
def test_resolve(self):
samples = """
a
a.b
(c, d.e)
[f, (g.h.i)]
j(k, l)
"""
nodes = parser.parse(textwrap.dedent(samples), single_node=False)
nodes = tuple(resolve(node).value for node in nodes)
self.assertQNStringIs(nodes[0], 'a')
self.assertQNStringIs(nodes[1], 'a.b')
self.assertQNStringIs(nodes[2].elts[0], 'c')
self.assertQNStringIs(nodes[2].elts[1], 'd.e')
self.assertQNStringIs(nodes[3].elts[0], 'f')
self.assertQNStringIs(nodes[3].elts[1], 'g.h.i')
self.assertQNStringIs(nodes[4].func, 'j')
self.assertQNStringIs(nodes[4].args[0], 'k')
self.assertQNStringIs(nodes[4].args[1], 'l')
def test_subscript_resolve(self):
samples = """
x[i]
x[i.b]
a.b[c]
a.b[x.y]
a[z[c]]
a[b[c[d]]]
a[b].c
a.b.c[d].e.f
a.b[c[d]].e.f
a.b[c[d.e.f].g].h
"""
nodes = parser.parse(textwrap.dedent(samples), single_node=False)
nodes = tuple(resolve(node).value for node in nodes)
self.assertQNStringIs(nodes[0], 'x[i]')
self.assertQNStringIs(nodes[1], 'x[i.b]')
self.assertQNStringIs(nodes[2], 'a.b[c]')
self.assertQNStringIs(nodes[3], 'a.b[x.y]')
self.assertQNStringIs(nodes[4], 'a[z[c]]')
self.assertQNStringIs(nodes[5], 'a[b[c[d]]]')
self.assertQNStringIs(nodes[6], 'a[b].c')
self.assertQNStringIs(nodes[7], 'a.b.c[d].e.f')
self.assertQNStringIs(nodes[8], 'a.b[c[d]].e.f')
self.assertQNStringIs(nodes[9], 'a.b[c[d.e.f].g].h')
def test_function_calls(self):
samples = """
a.b
a.b()
a().b
z[i]
z[i]()
z()[i]
"""
nodes = parser.parse(textwrap.dedent(samples), single_node=False)
nodes = tuple(resolve(node).value for node in nodes)
self.assertQNStringIs(nodes[0], 'a.b')
self.assertQNStringIs(nodes[1].func, 'a.b')
self.assertQNStringIs(nodes[2].value.func, 'a')
self.assertQNStringIs(nodes[3], 'z[i]')
self.assertQNStringIs(nodes[4].func, 'z[i]')
self.assertQNStringIs(nodes[5].value.func, 'z')
|
DALI-main
|
dali/test/python/autograph/pyct/test_qual_names.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ast_util module."""
import ast
import collections
import textwrap
import unittest
import gast
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import ast_util
from nvidia.dali._autograph.pyct import loader
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import pretty_printer
from nvidia.dali._autograph.pyct import qual_names
class AstUtilTest(unittest.TestCase):
def assertAstMatches(self, actual_node, expected_node_src):
expected_node = gast.parse('({})'.format(expected_node_src)).body[0]
msg = 'AST did not match expected:\n{}\nActual:\n{}'.format(
pretty_printer.fmt(expected_node),
pretty_printer.fmt(actual_node))
self.assertTrue(ast_util.matches(actual_node, expected_node), msg)
def setUp(self):
super(AstUtilTest, self).setUp()
self._invocation_counts = collections.defaultdict(lambda: 0)
def test_rename_symbols_basic(self):
node = parser.parse('a + b')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.QN('a'): qual_names.QN('renamed_a')})
source = parser.unparse(node, include_encoding_marker=False)
expected_node_src = 'renamed_a + b'
self.assertIsInstance(node.value.left.id, str)
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
def test_rename_symbols_attributes(self):
node = parser.parse('b.c = b.c.d')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.from_str('b.c'): qual_names.QN('renamed_b_c')})
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'renamed_b_c = renamed_b_c.d')
def test_rename_symbols_nonlocal(self):
node = parser.parse('nonlocal a, b, c')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.from_str('b'): qual_names.QN('renamed_b')})
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'nonlocal a, renamed_b, c')
def test_rename_symbols_global(self):
node = parser.parse('global a, b, c')
node = qual_names.resolve(node)
node = ast_util.rename_symbols(
node, {qual_names.from_str('b'): qual_names.QN('renamed_b')})
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'global a, renamed_b, c')
def test_rename_symbols_annotations(self):
node = parser.parse('a[i]')
node = qual_names.resolve(node)
anno.setanno(node, 'foo', 'bar')
orig_anno = anno.getanno(node, 'foo')
node = ast_util.rename_symbols(node,
{qual_names.QN('a'): qual_names.QN('b')})
self.assertIs(anno.getanno(node, 'foo'), orig_anno)
def test_rename_symbols_function(self):
node = parser.parse('def f():\n pass')
node = ast_util.rename_symbols(node,
{qual_names.QN('f'): qual_names.QN('f1')})
source = parser.unparse(node, include_encoding_marker=False)
self.assertEqual(source.strip(), 'def f1():\n pass')
def test_copy_clean(self):
node = parser.parse(
textwrap.dedent("""
def f(a):
return a + 1
"""))
setattr(node, '__foo', 'bar')
new_node = ast_util.copy_clean(node)
self.assertIsNot(new_node, node)
self.assertFalse(hasattr(new_node, '__foo'))
def test_copy_clean_preserves_annotations(self):
node = parser.parse(
textwrap.dedent("""
def f(a):
return a + 1
"""))
anno.setanno(node, 'foo', 'bar')
anno.setanno(node, 'baz', 1)
new_node = ast_util.copy_clean(node, preserve_annos={'foo'})
self.assertEqual(anno.getanno(new_node, 'foo'), 'bar')
self.assertFalse(anno.hasanno(new_node, 'baz'))
def test_keywords_to_dict(self):
keywords = parser.parse_expression('f(a=b, c=1, d=\'e\')').keywords
d = ast_util.keywords_to_dict(keywords)
# Make sure we generate a usable dict node by attaching it to a variable and
# compiling everything.
node = parser.parse('def f(b): pass')
node.body.append(ast.Return(d))
result, _, _ = loader.load_ast(node)
self.assertDictEqual(result.f(3), {'a': 3, 'c': 1, 'd': 'e'})
def assertMatch(self, target_str, pattern_str):
node = parser.parse_expression(target_str)
pattern = parser.parse_expression(pattern_str)
self.assertTrue(ast_util.matches(node, pattern))
def assertNoMatch(self, target_str, pattern_str):
node = parser.parse_expression(target_str)
pattern = parser.parse_expression(pattern_str)
self.assertFalse(ast_util.matches(node, pattern))
def test_matches_symbols(self):
self.assertMatch('foo', '_')
self.assertNoMatch('foo()', '_')
self.assertMatch('foo + bar', 'foo + _')
self.assertNoMatch('bar + bar', 'foo + _')
self.assertNoMatch('foo - bar', 'foo + _')
def test_matches_function_args(self):
self.assertMatch('super(Foo, self).__init__(arg1, arg2)',
'super(_).__init__(_)')
self.assertMatch('super().__init__()', 'super(_).__init__(_)')
self.assertNoMatch('super(Foo, self).bar(arg1, arg2)',
'super(_).__init__(_)')
self.assertMatch('super(Foo, self).__init__()', 'super(Foo, _).__init__(_)')
self.assertNoMatch('super(Foo, self).__init__()',
'super(Bar, _).__init__(_)')
def _mock_apply_fn(self, target, source):
target = parser.unparse(target, include_encoding_marker=False)
source = parser.unparse(source, include_encoding_marker=False)
self._invocation_counts[(target.strip(), source.strip())] += 1
def test_apply_to_single_assignments_dynamic_unpack(self):
node = parser.parse('a, b, c = d')
ast_util.apply_to_single_assignments(node.targets, node.value,
self._mock_apply_fn)
self.assertDictEqual(self._invocation_counts, {
('a', 'd[0]'): 1,
('b', 'd[1]'): 1,
('c', 'd[2]'): 1,
})
def test_apply_to_single_assignments_static_unpack(self):
node = parser.parse('a, b, c = d, e, f')
ast_util.apply_to_single_assignments(node.targets, node.value,
self._mock_apply_fn)
self.assertDictEqual(self._invocation_counts, {
('a', 'd'): 1,
('b', 'e'): 1,
('c', 'f'): 1,
})
def test_parallel_walk(self):
src = """
def f(a):
return a + 1
"""
node = parser.parse(textwrap.dedent(src))
for child_a, child_b in ast_util.parallel_walk(node, node):
self.assertEqual(child_a, child_b)
def test_parallel_walk_string_leaves(self):
src = """
def f(a):
global g
"""
node = parser.parse(textwrap.dedent(src))
for child_a, child_b in ast_util.parallel_walk(node, node):
self.assertEqual(child_a, child_b)
def test_parallel_walk_inconsistent_trees(self):
node_1 = parser.parse(
textwrap.dedent("""
def f(a):
return a + 1
"""))
node_2 = parser.parse(
textwrap.dedent("""
def f(a):
return a + (a * 2)
"""))
node_3 = parser.parse(
textwrap.dedent("""
def f(a):
return a + 2
"""))
with self.assertRaises(ValueError):
for _ in ast_util.parallel_walk(node_1, node_2):
pass
# There is not particular reason to reject trees that differ only in the
# value of a constant.
# TODO(mdan): This should probably be allowed.
with self.assertRaises(ValueError):
for _ in ast_util.parallel_walk(node_1, node_3):
pass
def assertLambdaNodes(self, matching_nodes, expected_bodies):
self.assertEqual(len(matching_nodes), len(expected_bodies))
for node in matching_nodes:
self.assertIsInstance(node, gast.Lambda)
self.assertIn(
parser.unparse(node.body, include_encoding_marker=False).strip(),
expected_bodies)
|
DALI-main
|
dali/test/python/autograph/pyct/test_ast_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for templates module."""
import imp
import unittest
import gast
from nvidia.dali._autograph.pyct import gast_util
from nvidia.dali._autograph.pyct import loader
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import qual_names as qn
from nvidia.dali._autograph.pyct import templates
class _CtxClearer(gast.NodeTransformer):
def visit(self, node):
super(_CtxClearer, self).visit(node)
if hasattr(node, 'ctx'):
node.ctx = None
return node
def _parse_with_unset_ctx(expr_source):
ast_node = parser.parse_expression(expr_source)
_CtxClearer().visit(ast_node)
return ast_node
class _CtxChecker(gast.NodeTransformer):
def __init__(self, test_instance, expected_ctx):
self.at_top_level = True
self.test_instance = test_instance
self.expected_ctx = expected_ctx
def visit(self, node):
if hasattr(node, 'ctx'):
self.test_instance.assertIsInstance(node.ctx, self.expected_ctx)
if self.at_top_level:
self.at_top_level = False
self.expected_ctx = gast.Load
return super(_CtxChecker, self).visit(node)
class TemplatesTest(unittest.TestCase):
def assertExpectedCtxSet(self, node, ctx):
"""Assert that node has ctx=ctx at top and ctx=gast.Load everywhere else."""
checker = _CtxChecker(self, ctx)
checker.visit(node)
def test_replace_tuple(self):
template = """
def test_fn(a, c):
return b,
"""
node = templates.replace(template, b=('a', 'c'))[0]
result, _, _ = loader.load_ast(node)
self.assertEqual((2, 3), result.test_fn(2, 3))
def test_replace_variable(self):
template = """
def test_fn(a):
a += 1
a = 2 * a + 1
return b
"""
node = templates.replace(template, a='b')[0]
result, _, _ = loader.load_ast(node)
self.assertEqual(7, result.test_fn(2))
def test_replace_function_name(self):
template = """
def fname(a):
a += 1
a = 2 * a + 1
return a
"""
node = templates.replace(template, fname='test_fn')[0]
result, _, _ = loader.load_ast(node)
self.assertEqual(7, result.test_fn(2))
def test_replace_code_block(self):
template = """
def test_fn(a):
block
return a
"""
class ShouldBeReplaced(object):
pass
node = templates.replace(
template,
block=[
gast_util.compat_assign(
[
gast.Name(
'a',
ctx=ShouldBeReplaced,
annotation=None,
type_comment=None)
],
gast.BinOp(
gast.Name(
'a',
ctx=ShouldBeReplaced,
annotation=None,
type_comment=None), gast.Add(),
gast.Constant(1, kind=None)),
None
),
] * 2)[0]
result, _, _ = loader.load_ast(node)
self.assertEqual(3, result.test_fn(1))
def test_replace_attribute(self):
template = """
def test_fn(a):
return a.foo
"""
node = templates.replace(template, foo='b')[0]
result, _, _ = loader.load_ast(node)
mod = imp.new_module('test')
mod.b = 3
self.assertEqual(3, result.test_fn(mod))
with self.assertRaises(ValueError):
templates.replace(template, foo=1)
def test_replace_attribute_context(self):
template = """
def test_fn(foo):
foo = 0
"""
node = templates.replace(
template,
foo=parser.parse_expression('a.b.c'))[0]
self.assertIsInstance(node.body[0].targets[0].ctx, gast.Store)
self.assertIsInstance(node.body[0].targets[0].value.ctx, gast.Load)
self.assertIsInstance(node.body[0].targets[0].value.value.ctx, gast.Load)
def test_replace_list_context(self):
template = """
def test_fn(foo):
foo = 0
"""
node = templates.replace(template, foo=parser.parse_expression('[a, b]'))[0]
self.assertIsInstance(node.body[0].targets[0].ctx, gast.Store)
self.assertIsInstance(node.body[0].targets[0].elts[0].ctx, gast.Store)
self.assertIsInstance(node.body[0].targets[0].elts[1].ctx, gast.Store)
def test_replace_tuple_context(self):
template = """
def test_fn(foo):
foo = 0
"""
node = templates.replace(template, foo=parser.parse_expression('(a, b)'))[0]
self.assertIsInstance(node.body[0].targets[0].ctx, gast.Store)
self.assertIsInstance(node.body[0].targets[0].elts[0].ctx, gast.Store)
self.assertIsInstance(node.body[0].targets[0].elts[1].ctx, gast.Store)
def test_replace_expression_context(self):
template = """
def test_fn():
foo
"""
node = templates.replace(
template, foo=parser.parse_expression('a + 2 * b / -c'))[0]
self.assertIsInstance(node.body[0].left.ctx, gast.Load)
self.assertIsInstance(node.body[0].right.left.right.ctx, gast.Load)
def test_replace_complex_context(self):
template = """
def test_fn():
foo = 0
"""
node = templates.replace(
template, foo=parser.parse_expression('bar(([a, b],)).baz'))[0]
self.assertIsInstance(node.body[0].targets[0].ctx, gast.Store)
function_call_arg = node.body[0].targets[0].value.args[0]
self.assertIsInstance(function_call_arg.elts[0].ctx, gast.Load)
self.assertIsInstance(function_call_arg.elts[0].elts[0].ctx, gast.Load)
self.assertIsInstance(function_call_arg.elts[0].elts[1].ctx, gast.Load)
def test_replace_index(self):
template = """
def test_fn():
foo = 0
"""
node = templates.replace(
template, foo=parser.parse_expression('foo(a[b]).bar'))[0]
function_call_arg = node.body[0].targets[0].value.args[0]
self.assertIsInstance(function_call_arg.ctx, gast.Load)
self.assertIsInstance(function_call_arg.slice.ctx, gast.Load)
def test_replace_call_keyword(self):
template = """
def test_fn():
def f(a, d, f):
return a + d + f
return f(1, kws=None)
"""
source = parser.parse_expression('f(d=3, f=5)')
node = templates.replace(template, kws=source.keywords)[0]
result, _, _ = loader.load_ast(node)
self.assertEqual(9, result.test_fn())
with self.assertRaises(ValueError):
templates.replace(template, kws=[])
templates.replace(template, kws=1)
def test_replace_name_with_call(self):
template = """
def test_fn():
b = 5
def g(a):
return 3 * a
def f():
return g
return foo
"""
source = parser.parse_expression('f()(b)')
node = templates.replace(template, foo=source)[0]
result, _, _ = loader.load_ast(node)
self.assertEqual(15, result.test_fn())
def test_replace_name_with_dict(self):
template = """
def test_fn():
return foo['bar']
"""
source = parser.parse_expression('{\'bar\': 3}')
node = templates.replace(template, foo=source)[0]
result, _, _ = loader.load_ast(node)
self.assertEqual(3, result.test_fn())
def test_replace_as_expression(self):
template = """
foo(a)
"""
node = templates.replace_as_expression(template, foo='bar', a='baz')
self.assertIsInstance(node, gast.Call)
self.assertEqual(node.func.id, 'bar')
self.assertEqual(node.args[0].id, 'baz')
def test_replace_as_expression_restrictions(self):
template = """
foo(a)
bar(b)
"""
with self.assertRaises(ValueError):
templates.replace_as_expression(template)
def test_function_call_in_list(self):
template = """
foo(bar)
"""
source = parser.parse_expression('[a(b(1))]')
templates.replace_as_expression(template, bar=source)
def test_star_comprehension_in_function_call(self):
template = """
a = foo(func, args)
"""
source = parser.parse_expression('bar(*[i for i in range(j)])')
node = templates.replace(template, func=source.func, args=source.args)
arg_node = node[0].value.args[1].value
self.assertIsInstance(arg_node.generators[0].target.ctx, gast.Store)
self.assertIsInstance(arg_node.elt.ctx, gast.Load)
def test_lambda_in_function_call(self):
template = """
a = foo(arg)
"""
source = parser.parse_expression('[lambda i: i]')
node = templates.replace(template, arg=source)
lambda_arg = node[0].value.args[0].elts[0]
self.assertIsInstance(lambda_arg.args.args[0].ctx, gast.Param)
self.assertIsInstance(lambda_arg.body.ctx, gast.Load)
def test_replace_name_with_subscript(self):
template = """
foo = bar
"""
replacement = qn.QN(qn.QN('dictionary'), subscript=qn.QN('key'))
node = templates.replace(template, foo=replacement)[0].targets[0]
self.assertIsInstance(node.ctx, gast.Store)
self.assertIsInstance(node.value.ctx, gast.Load)
def test_replace_name_mixed_attr_subscript(self):
template = 'foo = bar'
for name, expression_source in [('mixed_attr_subscript', 'a.b["c"]'),
('mixed_subscript_attr', 'a[b.c]'),
('nested_subscript', 'a[b[c]]'),
('repeated_subscript', 'a[b][c]')]:
replacement = _parse_with_unset_ctx(expression_source)
target_node = templates.replace(template, foo=replacement)[0].targets[0]
self.assertExpectedCtxSet(target_node, gast.Store)
value_node = templates.replace(template, bar=replacement)[0].value
self.assertExpectedCtxSet(value_node, gast.Load)
|
DALI-main
|
dali/test/python/autograph/pyct/test_templates.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for inspect_utils module."""
import abc
import collections
import functools
import imp
import textwrap
import unittest
import six
from nvidia.dali._autograph.pyct import inspect_utils
from nvidia.dali._autograph.pyct.testing import basic_definitions
from nvidia.dali._autograph.pyct.testing import decorators
def decorator(f):
return f
def function_decorator():
def dec(f):
return f
return dec
def wrapping_decorator():
def dec(f):
def replacement(*_):
return None
@functools.wraps(f)
def wrapper(*args, **kwargs):
return replacement(*args, **kwargs)
return wrapper
return dec
class TestClass(object):
def member_function(self):
pass
@decorator
def decorated_member(self):
pass
@function_decorator()
def fn_decorated_member(self):
pass
@wrapping_decorator()
def wrap_decorated_member(self):
pass
@staticmethod
def static_method():
pass
@classmethod
def class_method(cls):
pass
def free_function():
pass
def factory():
return free_function
def free_factory():
def local_function():
pass
return local_function
class InspectUtilsTest(unittest.TestCase):
def test_islambda(self):
def test_fn():
pass
self.assertTrue(inspect_utils.islambda(lambda x: x))
self.assertFalse(inspect_utils.islambda(test_fn))
def test_islambda_renamed_lambda(self):
l = lambda x: 1
l.__name__ = 'f'
self.assertTrue(inspect_utils.islambda(l))
def test_isnamedtuple(self):
nt = collections.namedtuple('TestNamedTuple', ['a', 'b'])
class NotANamedTuple(tuple):
pass
self.assertTrue(inspect_utils.isnamedtuple(nt))
self.assertFalse(inspect_utils.isnamedtuple(NotANamedTuple))
def test_isnamedtuple_confounder(self):
"""This test highlights false positives when detecting named tuples."""
class NamedTupleLike(tuple):
_fields = ('a', 'b')
self.assertTrue(inspect_utils.isnamedtuple(NamedTupleLike))
def test_isnamedtuple_subclass(self):
"""This test highlights false positives when detecting named tuples."""
class NamedTupleSubclass(collections.namedtuple('Test', ['a', 'b'])):
pass
self.assertTrue(inspect_utils.isnamedtuple(NamedTupleSubclass))
def assertSourceIdentical(self, actual, expected):
self.assertEqual(
textwrap.dedent(actual).strip(),
textwrap.dedent(expected).strip()
)
def test_getimmediatesource_basic(self):
def test_decorator(f):
def f_wrapper(*args, **kwargs):
return f(*args, **kwargs)
return f_wrapper
expected = """
def f_wrapper(*args, **kwargs):
return f(*args, **kwargs)
"""
@test_decorator
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getimmediatesource_noop_decorator(self):
def test_decorator(f):
return f
expected = '''
@test_decorator
def test_fn(a):
"""Test docstring."""
return [a]
'''
@test_decorator
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getimmediatesource_functools_wrapper(self):
def wrapper_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
expected = textwrap.dedent("""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
""")
@wrapper_decorator
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getimmediatesource_functools_wrapper_different_module(self):
expected = textwrap.dedent("""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
""")
@decorators.wrapping_decorator
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getimmediatesource_normal_decorator_different_module(self):
expected = textwrap.dedent("""
def standalone_wrapper(*args, **kwargs):
return f(*args, **kwargs)
""")
@decorators.standalone_decorator
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getimmediatesource_normal_functional_decorator_different_module(
self):
expected = textwrap.dedent("""
def functional_wrapper(*args, **kwargs):
return f(*args, **kwargs)
""")
@decorators.functional_decorator()
def test_fn(a):
"""Test docstring."""
return [a]
self.assertSourceIdentical(
inspect_utils.getimmediatesource(test_fn), expected)
def test_getnamespace_globals(self):
ns = inspect_utils.getnamespace(factory)
self.assertEqual(ns['free_function'], free_function)
def test_getnamespace_closure_with_undefined_var(self):
if False: # pylint:disable=using-constant-test
a = 1
def test_fn():
return a
ns = inspect_utils.getnamespace(test_fn)
self.assertNotIn('a', ns)
a = 2
ns = inspect_utils.getnamespace(test_fn)
self.assertEqual(ns['a'], 2)
def test_getnamespace_hermetic(self):
# Intentionally hiding the global function to make sure we don't overwrite
# it in the global namespace.
free_function = object() # pylint:disable=redefined-outer-name
def test_fn():
return free_function
ns = inspect_utils.getnamespace(test_fn)
globs = six.get_function_globals(test_fn)
self.assertTrue(ns['free_function'] is free_function)
self.assertFalse(globs['free_function'] is free_function)
def test_getnamespace_locals(self):
def called_fn():
return 0
closed_over_list = []
closed_over_primitive = 1
def local_fn():
closed_over_list.append(1)
local_var = 1
return called_fn() + local_var + closed_over_primitive
ns = inspect_utils.getnamespace(local_fn)
self.assertEqual(ns['called_fn'], called_fn)
self.assertEqual(ns['closed_over_list'], closed_over_list)
self.assertEqual(ns['closed_over_primitive'], closed_over_primitive)
self.assertTrue('local_var' not in ns)
def test_getqualifiedname(self):
foo = object()
qux = imp.new_module('quxmodule')
bar = imp.new_module('barmodule')
baz = object()
bar.baz = baz
ns = {
'foo': foo,
'bar': bar,
'qux': qux,
}
self.assertIsNone(inspect_utils.getqualifiedname(ns, inspect_utils))
self.assertEqual(inspect_utils.getqualifiedname(ns, foo), 'foo')
self.assertEqual(inspect_utils.getqualifiedname(ns, bar), 'bar')
self.assertEqual(inspect_utils.getqualifiedname(ns, baz), 'bar.baz')
def test_getqualifiedname_efficiency(self):
foo = object()
# We create a densely connected graph consisting of a relatively small
# number of modules and hide our symbol in one of them. The path to the
# symbol is at least 10, and each node has about 10 neighbors. However,
# by skipping visited modules, the search should take much less.
ns = {}
prev_level = []
for i in range(10):
current_level = []
for j in range(10):
mod_name = 'mod_{}_{}'.format(i, j)
mod = imp.new_module(mod_name)
current_level.append(mod)
if i == 9 and j == 9:
mod.foo = foo
if prev_level:
# All modules at level i refer to all modules at level i+1
for prev in prev_level:
for mod in current_level:
prev.__dict__[mod.__name__] = mod
else:
for mod in current_level:
ns[mod.__name__] = mod
prev_level = current_level
self.assertIsNone(inspect_utils.getqualifiedname(ns, inspect_utils))
self.assertIsNotNone(
inspect_utils.getqualifiedname(ns, foo, max_depth=10000000000))
def test_getqualifiedname_cycles(self):
foo = object()
# We create a graph of modules that contains circular references. The
# search process should avoid them. The searched object is hidden at the
# bottom of a path of length roughly 10.
ns = {}
mods = []
for i in range(10):
mod = imp.new_module('mod_{}'.format(i))
if i == 9:
mod.foo = foo
# Module i refers to module i+1
if mods:
mods[-1].__dict__[mod.__name__] = mod
else:
ns[mod.__name__] = mod
# Module i refers to all modules j < i.
for prev in mods:
mod.__dict__[prev.__name__] = prev
mods.append(mod)
self.assertIsNone(inspect_utils.getqualifiedname(ns, inspect_utils))
self.assertIsNotNone(
inspect_utils.getqualifiedname(ns, foo, max_depth=10000000000))
def test_getmethodclass(self):
self.assertEqual(
inspect_utils.getmethodclass(free_function), None)
self.assertEqual(
inspect_utils.getmethodclass(free_factory()), None)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.member_function),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.fn_decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.wrap_decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.static_method),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(TestClass.class_method),
TestClass)
test_obj = TestClass()
self.assertEqual(
inspect_utils.getmethodclass(test_obj.member_function),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.fn_decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.wrap_decorated_member),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.static_method),
TestClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.class_method),
TestClass)
def test_getmethodclass_locals(self):
def local_function():
pass
class LocalClass(object):
def member_function(self):
pass
@decorator
def decorated_member(self):
pass
@function_decorator()
def fn_decorated_member(self):
pass
@wrapping_decorator()
def wrap_decorated_member(self):
pass
self.assertEqual(
inspect_utils.getmethodclass(local_function), None)
self.assertEqual(
inspect_utils.getmethodclass(LocalClass.member_function),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(LocalClass.decorated_member),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(LocalClass.fn_decorated_member),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(LocalClass.wrap_decorated_member),
LocalClass)
test_obj = LocalClass()
self.assertEqual(
inspect_utils.getmethodclass(test_obj.member_function),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.decorated_member),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.fn_decorated_member),
LocalClass)
self.assertEqual(
inspect_utils.getmethodclass(test_obj.wrap_decorated_member),
LocalClass)
def test_getmethodclass_callables(self):
class TestCallable(object):
def __call__(self):
pass
c = TestCallable()
self.assertEqual(inspect_utils.getmethodclass(c), TestCallable)
def test_getdefiningclass(self):
class Superclass(object):
def foo(self):
pass
def bar(self):
pass
@classmethod
def class_method(cls):
pass
class Subclass(Superclass):
def foo(self):
pass
def baz(self):
pass
self.assertIs(
inspect_utils.getdefiningclass(Subclass.foo, Subclass), Subclass)
self.assertIs(
inspect_utils.getdefiningclass(Subclass.bar, Subclass), Superclass)
self.assertIs(
inspect_utils.getdefiningclass(Subclass.baz, Subclass), Subclass)
self.assertIs(
inspect_utils.getdefiningclass(Subclass.class_method, Subclass),
Superclass)
def test_isbuiltin(self):
self.assertTrue(inspect_utils.isbuiltin(enumerate))
self.assertTrue(inspect_utils.isbuiltin(eval))
self.assertTrue(inspect_utils.isbuiltin(float))
self.assertTrue(inspect_utils.isbuiltin(int))
self.assertTrue(inspect_utils.isbuiltin(len))
self.assertTrue(inspect_utils.isbuiltin(range))
self.assertTrue(inspect_utils.isbuiltin(zip))
self.assertFalse(inspect_utils.isbuiltin(function_decorator))
def test_isconstructor(self):
class OrdinaryClass(object):
pass
class OrdinaryCallableClass(object):
def __call__(self):
pass
class Metaclass(type):
pass
class CallableMetaclass(type):
def __call__(cls):
pass
self.assertTrue(inspect_utils.isconstructor(OrdinaryClass))
self.assertTrue(inspect_utils.isconstructor(OrdinaryCallableClass))
self.assertTrue(inspect_utils.isconstructor(Metaclass))
self.assertTrue(inspect_utils.isconstructor(Metaclass('TestClass', (), {})))
self.assertTrue(inspect_utils.isconstructor(CallableMetaclass))
self.assertFalse(inspect_utils.isconstructor(
CallableMetaclass('TestClass', (), {})))
def test_isconstructor_abc_callable(self):
@six.add_metaclass(abc.ABCMeta)
class AbcBase(object):
@abc.abstractmethod
def __call__(self):
pass
class AbcSubclass(AbcBase):
def __init__(self):
pass
def __call__(self):
pass
self.assertTrue(inspect_utils.isconstructor(AbcBase))
self.assertTrue(inspect_utils.isconstructor(AbcSubclass))
def test_getfutureimports_functions(self):
imps = inspect_utils.getfutureimports(basic_definitions.function_with_print)
self.assertNotIn('absolute_import', imps)
self.assertNotIn('division', imps)
self.assertNotIn('print_function', imps)
self.assertNotIn('generators', imps)
def test_getfutureimports_lambdas(self):
imps = inspect_utils.getfutureimports(basic_definitions.simple_lambda)
self.assertNotIn('absolute_import', imps)
self.assertNotIn('division', imps)
self.assertNotIn('print_function', imps)
self.assertNotIn('generators', imps)
def test_getfutureimports_methods(self):
imps = inspect_utils.getfutureimports(
basic_definitions.SimpleClass.method_with_print)
self.assertNotIn('absolute_import', imps)
self.assertNotIn('division', imps)
self.assertNotIn('print_function', imps)
self.assertNotIn('generators', imps)
|
DALI-main
|
dali/test/python/autograph/pyct/test_inspect_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cfg module."""
import unittest
import gast
from nvidia.dali._autograph.pyct import cfg
from nvidia.dali._autograph.pyct import parser
class CountingVisitor(cfg.GraphVisitor):
def __init__(self, graph):
super(CountingVisitor, self).__init__(graph)
self.counts = {}
def init_state(self, _):
return None
def visit_node(self, node):
self.counts[node.ast_node] = self.counts.get(node.ast_node, 0) + 1
return False # visit only once
class GraphVisitorTest(unittest.TestCase):
def _build_cfg(self, fn):
node, _ = parser.parse_entity(fn, future_features=())
cfgs = cfg.build(node)
return cfgs, node
def test_basic_coverage_forward(self):
def test_fn(a):
while a > 0:
a = 1
break
return a # pylint:disable=unreachable
a = 2
graphs, node = self._build_cfg(test_fn)
graph, = graphs.values()
visitor = CountingVisitor(graph)
visitor.visit_forward()
self.assertEqual(visitor.counts[node.args], 1)
self.assertEqual(visitor.counts[node.body[0].test], 1)
self.assertEqual(visitor.counts[node.body[0].body[0]], 1)
self.assertEqual(visitor.counts[node.body[0].body[1]], 1)
# The return node should be unreachable in forward direction.
self.assertNotIn(node.body[0].body[2], visitor.counts)
self.assertEqual(visitor.counts[node.body[1]], 1)
def test_basic_coverage_reverse(self):
def test_fn(a):
while a > 0:
a = 1
break
return a # pylint:disable=unreachable
a = 2
graphs, node = self._build_cfg(test_fn)
graph, = graphs.values()
visitor = CountingVisitor(graph)
visitor.visit_reverse()
self.assertEqual(visitor.counts[node.args], 1)
self.assertEqual(visitor.counts[node.body[0].test], 1)
self.assertEqual(visitor.counts[node.body[0].body[0]], 1)
self.assertEqual(visitor.counts[node.body[0].body[1]], 1)
self.assertEqual(visitor.counts[node.body[0].body[2]], 1)
self.assertEqual(visitor.counts[node.body[1]], 1)
class AstToCfgTest(unittest.TestCase):
def _build_cfg(self, fn):
node, _ = parser.parse_entity(fn, future_features=())
cfgs = cfg.build(node)
return cfgs
def _repr_set(self, node_set):
return frozenset(repr(n) for n in node_set)
def _as_set(self, elements):
if elements is None:
return frozenset()
elif isinstance(elements, str):
return frozenset((elements,))
else:
return frozenset(elements)
def assertGraphMatches(self, graph, edges):
"""Tests whether the CFG contains the specified edges."""
for prev, node_repr, next_ in edges:
matched = False
for cfg_node in graph.index.values():
if repr(cfg_node) == node_repr:
if (self._as_set(prev) == frozenset(map(repr, cfg_node.prev)) and
self._as_set(next_) == frozenset(map(repr, cfg_node.next))):
matched = True
break
if not matched:
self.fail(
'match failed for node "%s" in graph:\n%s' % (node_repr, graph))
def assertGraphEnds(self, graph, entry_repr, exit_reprs):
"""Tests whether the CFG has the specified entry and exits."""
self.assertEqual(repr(graph.entry), entry_repr)
self.assertSetEqual(frozenset(map(repr, graph.exit)), frozenset(exit_reprs))
def assertStatementEdges(self, graph, edges):
"""Tests whether the CFG contains the specified statement edges."""
for prev_node_reprs, node_repr, next_node_reprs in edges:
matched = False
partial_matches = []
self.assertSetEqual(
frozenset(graph.stmt_next.keys()), frozenset(graph.stmt_prev.keys()))
for stmt_ast_node in graph.stmt_next:
ast_repr = '%s:%s' % (stmt_ast_node.__class__.__name__,
stmt_ast_node.lineno)
if ast_repr == node_repr:
actual_next = frozenset(map(repr, graph.stmt_next[stmt_ast_node]))
actual_prev = frozenset(map(repr, graph.stmt_prev[stmt_ast_node]))
partial_matches.append((actual_prev, node_repr, actual_next))
if (self._as_set(prev_node_reprs) == actual_prev and
self._as_set(next_node_reprs) == actual_next):
matched = True
break
if not matched:
self.fail('edges mismatch for %s: %s' % (node_repr, partial_matches))
def test_straightline(self):
def test_fn(a):
a += 1
a = 2
a = 3
return
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', 'a += 1'),
('a += 1', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', 'return'),
('a = 3', 'return', None),
),
)
self.assertGraphEnds(graph, 'a', ('return',))
def test_straightline_no_return(self):
def test_fn(a, b):
a = b + 1
a += max(a)
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a, b', 'a = (b + 1)'),
('a = (b + 1)', 'a += max(a)', None),
),
)
self.assertGraphEnds(graph, 'a, b', ('a += max(a)',))
def test_unreachable_code(self):
def test_fn(a):
return
a += 1 # pylint:disable=unreachable
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', 'return'),
('a', 'return', None),
(None, 'a += 1', None),
),
)
self.assertGraphEnds(graph, 'a', ('return', 'a += 1'))
def test_if_straightline(self):
def test_fn(a):
if a > 0:
a = 1
else:
a += -1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('(a > 0)', 'a = 1', None),
('(a > 0)', 'a += (- 1)', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
self.assertGraphEnds(graph, 'a', ('a = 1', 'a += (- 1)'))
def test_branch_nested(self):
def test_fn(a):
if a > 0:
if a > 1:
a = 1
else:
a = 2
else:
if a > 2:
a = 3
else:
a = 4
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('a', '(a > 0)', ('(a > 1)', '(a > 2)')),
('(a > 0)', '(a > 1)', ('a = 1', 'a = 2')),
('(a > 1)', 'a = 1', None),
('(a > 1)', 'a = 2', None),
('(a > 0)', '(a > 2)', ('a = 3', 'a = 4')),
('(a > 2)', 'a = 3', None),
('(a > 2)', 'a = 4', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'If:2', None),
('(a > 0)', 'If:3', None),
('(a > 0)', 'If:8', None),
),
)
self.assertGraphEnds(graph, 'a', ('a = 1', 'a = 2', 'a = 3', 'a = 4'))
def test_branch_straightline_unbalanced(self):
def test_fn(a):
if a > 0:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('a', '(a > 0)', 'a = 1'),
('(a > 0)', 'a = 1', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
self.assertGraphEnds(graph, 'a', ('(a > 0)', 'a = 1'))
def test_branch_return(self):
def test_fn(a):
if a > 0:
return
else:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', ('return', 'a = 1')),
('(a > 0)', 'a = 1', 'a = 2'),
('(a > 0)', 'return', None),
('a = 1', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', 'a = 2'),),
)
self.assertGraphEnds(graph, 'a', ('a = 2', 'return'))
def test_branch_raise(self):
def test_fn(a):
if a > 0:
raise a
else:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', ('raise a', 'a = 1')),
('(a > 0)', 'a = 1', 'a = 2'),
('(a > 0)', 'raise a', None),
('a = 1', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', 'a = 2'),),
)
self.assertGraphEnds(graph, 'a', ('a = 2', 'raise a'))
def test_branch_return_minimal(self):
def test_fn(a):
if a > 0:
return
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', 'return'),
('(a > 0)', 'return', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
self.assertGraphEnds(graph, 'a', ('(a > 0)', 'return'))
def test_while_straightline(self):
def test_fn(a):
while a > 0:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('a = 1', 'a = 2')),
('(a > 0)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'While:2', 'a = 2'),),
)
self.assertGraphEnds(graph, 'a', ('a = 2',))
def test_while_else_straightline(self):
def test_fn(a):
while a > 0:
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('a = 1', 'a = 2')),
('(a > 0)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'While:2', 'a = 3'),),
)
self.assertGraphEnds(graph, 'a', ('a = 3',))
def test_while_else_continue(self):
def test_fn(a):
while a > 0:
if a > 1:
continue
else:
a = 0
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'continue', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('continue', 'a = 0')),
('(a > 1)', 'continue', '(a > 0)'),
('a = 0', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', ('a = 1', '(a > 0)')),
),
)
self.assertGraphEnds(graph, 'a', ('a = 3',))
def test_while_else_break(self):
def test_fn(a):
while a > 0:
if a > 1:
break
a = 1
else:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('break', 'a = 1')),
('(a > 1)', 'break', 'a = 3'),
('(a > 1)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
(('break', 'a = 2'), 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', ('a = 1', 'a = 3')),
),
)
self.assertGraphEnds(graph, 'a', ('a = 3',))
def test_while_else_return(self):
def test_fn(a):
while a > 0:
if a > 1:
return
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('return', 'a = 1')),
('(a > 1)', 'return', None),
('(a > 1)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', 'a = 1'),
),
)
self.assertGraphEnds(graph, 'a', ('a = 3', 'return'))
def test_while_nested_straightline(self):
def test_fn(a):
while a > 0:
while a > 1:
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'a = 1'), '(a > 1)', ('a = 1', 'a = 2')),
('(a > 1)', 'a = 1', '(a > 1)'),
('(a > 1)', 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
),
)
self.assertGraphEnds(graph, 'a', ('a = 3',))
def test_while_nested_continue(self):
def test_fn(a):
while a > 0:
while a > 1:
if a > 3:
continue
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'continue', 'a = 1'), '(a > 1)', ('(a > 3)', 'a = 2')),
('(a > 1)', '(a > 3)', ('continue', 'a = 1')),
('(a > 3)', 'continue', '(a > 1)'),
('(a > 3)', 'a = 1', '(a > 1)'),
('(a > 1)', 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
('(a > 1)', 'If:4', ('a = 1', '(a > 1)')),
),
)
self.assertGraphEnds(graph, 'a', ('a = 3',))
def test_while_nested_break(self):
def test_fn(a):
while a > 0:
while a > 1:
if a > 2:
break
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(graph, (
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'a = 1'), '(a > 1)', ('(a > 2)', 'a = 2')),
('(a > 1)', '(a > 2)', ('break', 'a = 1')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', 'a = 1', '(a > 1)'),
(('(a > 1)', 'break'), 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
))
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
('(a > 1)', 'If:4', ('a = 1', 'a = 2')),
),
)
self.assertGraphEnds(graph, 'a', ('a = 3',))
def test_for_straightline(self):
def test_fn(a):
for a in range(0, a):
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('a = 1', 'a = 2')),
('range(0, a)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'For:2', 'a = 2'),),
)
self.assertGraphEnds(graph, 'a', ('a = 2',))
def test_for_else_straightline(self):
def test_fn(a):
for a in range(0, a):
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('a = 1', 'a = 2')),
('range(0, a)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'For:2', 'a = 3'),),
)
self.assertGraphEnds(graph, 'a', ('a = 3',))
def test_for_else_continue(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
continue
else:
a = 0
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'continue', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('continue', 'a = 0')),
('(a > 1)', 'continue', 'range(0, a)'),
('(a > 1)', 'a = 0', 'a = 1'),
('a = 0', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', ('a = 1', 'range(0, a)')),
),
)
self.assertGraphEnds(graph, 'a', ('a = 3',))
def test_for_else_break(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
break
a = 1
else:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('break', 'a = 1')),
('(a > 1)', 'break', 'a = 3'),
('(a > 1)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
(('break', 'a = 2'), 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', ('a = 1', 'a = 3')),
),
)
self.assertGraphEnds(graph, 'a', ('a = 3',))
def test_for_else_return(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
return
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('return', 'a = 1')),
('(a > 1)', 'return', None),
('(a > 1)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', 'a = 1'),
),
)
self.assertGraphEnds(graph, 'a', ('a = 3', 'return'))
def test_for_nested_straightline(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'b += 1'), 'range(1, a)', ('b += 1', 'a = 2')),
('range(1, a)', 'b += 1', 'range(1, a)'),
('range(1, a)', 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
),
)
self.assertGraphEnds(graph, 'a', ('a = 3',))
def test_for_nested_continue(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
if a > 3:
continue
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'continue', 'b += 1'), 'range(1, a)',
('(a > 3)', 'a = 2')),
('range(1, a)', '(a > 3)', ('continue', 'b += 1')),
('(a > 3)', 'continue', 'range(1, a)'),
('(a > 3)', 'b += 1', 'range(1, a)'),
('range(1, a)', 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
('range(1, a)', 'If:4', ('b += 1', 'range(1, a)')),
),
)
self.assertGraphEnds(graph, 'a', ('a = 3',))
def test_for_nested_break(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
if a > 2:
break
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'b += 1'), 'range(1, a)', ('(a > 2)', 'a = 2')),
('range(1, a)', '(a > 2)', ('break', 'b += 1')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', 'b += 1', 'range(1, a)'),
(('range(1, a)', 'break'), 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
('range(1, a)', 'If:4', ('b += 1', 'a = 2')),
),
)
self.assertGraphEnds(graph, 'a', ('a = 3',))
def test_complex(self):
def test_fn(a):
b = 0
while a > 0:
for b in range(0, a):
if a > 2:
break
if a > 3:
if a > 4:
continue
else:
max(a)
break
b += 1
else: # for b in range(0, a):
return a
a = 2
for a in range(1, a):
return b
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('b = 0', 'a = 2'), '(a > 0)', ('range(0, a)', 'range(1, a)')),
(
('(a > 0)', 'continue', 'b += 1'),
'range(0, a)',
('(a > 2)', 'return a'),
),
('range(0, a)', '(a > 2)', ('(a > 3)', 'break')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', '(a > 3)', ('(a > 4)', 'b += 1')),
('(a > 3)', '(a > 4)', ('continue', 'max(a)')),
('(a > 4)', 'max(a)', 'break'),
('max(a)', 'break', 'a = 2'),
('(a > 4)', 'continue', 'range(0, a)'),
('(a > 3)', 'b += 1', 'range(0, a)'),
('range(0, a)', 'return a', None),
('break', 'a = 2', '(a > 0)'),
('(a > 0)', 'range(1, a)', ('return b', 'a = 3')),
('range(1, a)', 'return b', None),
('range(1, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('b = 0', 'While:3', 'range(1, a)'),
('(a > 0)', 'For:4', 'a = 2'),
('range(0, a)', 'If:5', ('(a > 3)', 'a = 2')),
('(a > 2)', 'If:7', ('b += 1', 'a = 2', 'range(0, a)')),
('(a > 3)', 'If:8', ('a = 2', 'range(0, a)')),
('(a > 0)', 'For:17', 'a = 3'),
),
)
self.assertGraphEnds(graph, 'a', ('a = 3', 'return a', 'return b'))
def test_finally_straightline(self):
def test_fn(a):
try:
a += 1
finally:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a += 1', 'a = 2'),
('a += 1', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertGraphEnds(graph, 'a', ('a = 3',))
def test_return_finally(self):
def test_fn(a):
try:
return a
finally:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'return a', 'a = 1'),
('return a', 'a = 1', None),
(None, 'a = 2', None),
),
)
# Note, `a = 1` executes after `return a`.
self.assertGraphEnds(graph, 'a', ('a = 2', 'a = 1'))
def test_break_finally(self):
def test_fn(a):
while a > 0:
try:
break
finally:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', 'break'),
('(a > 0)', 'break', 'a = 1'),
('break', 'a = 1', None),
),
)
self.assertGraphEnds(graph, 'a', ('(a > 0)', 'a = 1'))
def test_continue_finally(self):
def test_fn(a):
while a > 0:
try:
continue
finally:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', 'continue'),
('(a > 0)', 'continue', 'a = 1'),
('continue', 'a = 1', '(a > 0)'),
),
)
self.assertGraphEnds(graph, 'a', ('(a > 0)',))
def test_with_straightline(self):
def test_fn(a):
with max(a) as b:
a = 0
return b
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'max(a)', 'a = 0'),
('max(a)', 'a = 0', 'return b'),
('a = 0', 'return b', None),
),
)
self.assertGraphEnds(graph, 'a', ('return b',))
def test_lambda_basic(self):
def test_fn(a):
a = lambda b: a + b
return a
graphs = self._build_cfg(test_fn)
for k, v in graphs.items():
if isinstance(k, gast.Lambda):
lam_graph = v
else:
fn_graph = v
self.assertGraphMatches(
fn_graph,
(
('a', '(lambda b: (a + b))', 'a = (lambda b: (a + b))'),
('(lambda b: (a + b))', 'a = (lambda b: (a + b))', 'return a'),
('a = (lambda b: (a + b))', 'return a', None),
),
)
self.assertGraphEnds(fn_graph, 'a', ('return a',))
self.assertGraphMatches(
lam_graph,
(
('b', '(a + b)', None),
),
)
self.assertGraphEnds(lam_graph, 'b', ('(a + b)',))
def test_lambda_in_return(self):
def test_fn(a):
return lambda b: a + b
graphs = self._build_cfg(test_fn)
for k, v in graphs.items():
if isinstance(k, gast.Lambda):
lam_graph = v
else:
fn_graph = v
self.assertGraphMatches(
fn_graph,
(
('a', '(lambda b: (a + b))', 'return (lambda b: (a + b))'),
('(lambda b: (a + b))', 'return (lambda b: (a + b))', None),
),
)
self.assertGraphEnds(fn_graph, 'a', ('return (lambda b: (a + b))',))
self.assertGraphMatches(
lam_graph,
(
('b', '(a + b)', None),
),
)
self.assertGraphEnds(lam_graph, 'b', ('(a + b)',))
def test_lambda_in_while_loop_test(self):
def test_fn(a):
while (lambda b: a + b)(a):
pass
graphs = self._build_cfg(test_fn)
for k, v in graphs.items():
if isinstance(k, gast.Lambda):
lam_graph = v
else:
fn_graph = v
self.assertGraphMatches(
fn_graph,
(
('a', '(lambda b: (a + b))', '(lambda b: (a + b))(a)'),
(('(lambda b: (a + b))', 'pass'), '(lambda b: (a + b))(a)', 'pass'),
('(lambda b: (a + b))(a)', 'pass', '(lambda b: (a + b))(a)'),
),
)
self.assertGraphEnds(fn_graph, 'a', ('(lambda b: (a + b))(a)',))
self.assertGraphMatches(
lam_graph,
(
('b', '(a + b)', None),
),
)
self.assertGraphEnds(lam_graph, 'b', ('(a + b)',))
def test_lambda_in_for_loop_test(self):
def test_fn(a):
for _ in (lambda b: a + b)(a):
pass
graphs = self._build_cfg(test_fn)
for k, v in graphs.items():
if isinstance(k, gast.Lambda):
lam_graph = v
else:
fn_graph = v
self.assertGraphMatches(
fn_graph,
(
('a', '(lambda b: (a + b))', '(lambda b: (a + b))(a)'),
(('(lambda b: (a + b))', 'pass'), '(lambda b: (a + b))(a)', 'pass'),
('(lambda b: (a + b))(a)', 'pass', '(lambda b: (a + b))(a)'),
),
)
self.assertGraphEnds(fn_graph, 'a', ('(lambda b: (a + b))(a)',))
self.assertGraphMatches(
lam_graph,
(
('b', '(a + b)', None),
),
)
self.assertGraphEnds(lam_graph, 'b', ('(a + b)',))
def test_pass(self):
def test_fn(a): # pylint:disable=unused-argument
pass
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'pass', None),
),
)
self.assertGraphEnds(graph, 'a', ('pass',))
def test_try_finally(self):
def test_fn(a):
try:
a = 1
finally:
a = 2
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', 'a = 2'),
('a = 1', 'a = 2', 'return a'),
('a = 2', 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
),
)
self.assertGraphEnds(graph, 'a', ('return a',))
def test_try_except_single_bare(self):
def test_fn(a):
try:
a = 1
a = 2
except: # pylint:disable=bare-except # noqa: E722
a = 3
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', 'a = 2'),
('a = 2', 'a = 3', 'return a'),
(('a = 2', 'a = 3'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 2', 'ExceptHandler:5', 'return a'),
),
)
self.assertGraphEnds(graph, 'a', ('return a',))
def test_try_except_single(self):
def test_fn(a):
try:
a = 1
a = 2
except Exception1: # pylint:disable=undefined-variable # noqa: F821
a = 3
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', 'a = 2'),
('a = 2', 'a = 3', 'return a'),
(('a = 2', 'a = 3'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 2', 'ExceptHandler:5', 'return a'),
),
)
self.assertGraphEnds(graph, 'a', ('return a',))
def test_try_except_single_aliased(self):
def test_fn(a):
try:
a = 1
except Exception1 as e: # pylint:disable=undefined-variable,unused-variable # noqa: F821,F841
a = 2
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'return a')),
(('a = 1', 'a = 2'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'return a'),
),
)
self.assertGraphEnds(graph, 'a', ('return a',))
def test_try_except_single_tuple_aliased(self):
def test_fn(a):
try:
a = 1
except (Exception1, Exception2) as e: # pylint:disable=undefined-variable,unused-variable # noqa: F821,F841,E501
a = 2
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'return a')),
(('a = 1', 'a = 2'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'return a'),
),
)
self.assertGraphEnds(graph, 'a', ('return a',))
def test_try_except_multiple(self):
def test_fn(a):
try:
a = 1
except Exception1: # pylint:disable=undefined-variable # noqa: F821
a = 2
except Exception2: # pylint:disable=undefined-variable # noqa: F821
a = 3
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'a = 3', 'return a')),
(('a = 1', 'a = 2', 'a = 3'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'return a'),
('a = 1', 'ExceptHandler:6', 'return a'),
),
)
self.assertGraphEnds(graph, 'a', ('return a',))
def test_try_except_finally(self):
def test_fn(a):
try:
a = 1
except Exception1: # pylint:disable=undefined-variable # noqa: F821
a = 2
except Exception2: # pylint:disable=undefined-variable # noqa: F821
a = 3
finally:
a = 4
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'a = 3', 'a = 4')),
(('a = 1', 'a = 2', 'a = 3'), 'a = 4', 'return a'),
('a = 4', 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'a = 4'),
('a = 1', 'ExceptHandler:6', 'a = 4'),
),
)
self.assertGraphEnds(graph, 'a', ('return a',))
def test_try_in_if(self):
def test_fn(a):
try:
if a > 0:
a = 1
else:
a = 2
except Exception1: # pylint:disable=undefined-variable # noqa: F821
a = 3
a = 4
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', ('a = 1', 'a = 2')),
('(a > 0)', 'a = 1', ('a = 3', 'a = 4')),
('(a > 0)', 'a = 2', ('a = 3', 'a = 4')),
(('a = 1', 'a = 2'), 'a = 3', 'a = 4'),
(('a = 1', 'a = 2', 'a = 3'), 'a = 4', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'a = 4'),
('a', 'If:3', ('a = 3', 'a = 4')),
(('a = 1', 'a = 2'), 'ExceptHandler:7', 'a = 4'),
),
)
self.assertGraphEnds(graph, 'a', ('a = 4',))
def test_try_in_if_all_branches_exit(self):
def test_fn(a, b):
try:
if a > 0:
raise b
else:
return 0
except b:
return 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a, b', '(a > 0)', ('raise b', 'return 0')),
('(a > 0)', 'raise b', 'return 1'),
('(a > 0)', 'return 0', None),
('raise b', 'return 1', None),
),
)
self.assertStatementEdges(
graph,
(
('a, b', 'Try:2', None),
('a, b', 'If:3', 'return 1'),
('raise b', 'ExceptHandler:7', None),
),
)
self.assertGraphEnds(graph, 'a, b', ('return 0', 'return 1', 'raise b'))
def test_raise_exits(self):
def test_fn(a, b):
raise b
return a # pylint:disable=unreachable
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a, b', 'raise b', None),
(None, 'return a', None),
),
)
self.assertGraphEnds(graph, 'a, b', ('raise b', 'return a'))
def test_raise_triggers_enclosing_finally(self):
def test_fn(a):
try:
try:
raise a
return 1 # pylint:disable=unreachable
finally:
b = 1
return 2
finally:
b = 2
return b
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'raise a', 'b = 1'),
(('raise a', 'return 1'), 'b = 1', 'b = 2'),
(None, 'return 1', 'b = 1'),
(None, 'return 2', 'b = 2'),
(('return 2', 'b = 1'), 'b = 2', None),
(None, 'return b', None),
),
)
self.assertGraphEnds(
graph, 'a', ('return b', 'b = 2'))
def test_raise_adds_finally_sortcuts(self):
def test_fn(a):
try:
try:
if a > 0:
raise a
c = 1
finally:
b = 1
c = 2
finally:
b = 2
return b, c
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', ('raise a', 'c = 1')),
('(a > 0)', 'raise a', 'b = 1'),
('(a > 0)', 'c = 1', 'b = 1'),
(('raise a', 'c = 1'), 'b = 1', ('c = 2', 'b = 2')),
('b = 1', 'c = 2', 'b = 2'),
(('b = 1', 'c = 2'), 'b = 2', 'return (b, c)'),
('b = 2', 'return (b, c)', None),
),
)
self.assertGraphEnds(
graph, 'a', ('return (b, c)', 'b = 2'))
def test_raise_exits_via_except(self):
def test_fn(a, b):
try:
raise b
except a:
c = 1
except b:
c = 2
finally:
c += 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a, b', 'raise b', ('c = 1', 'c = 2', 'c += 3')),
('raise b', 'c = 1', 'c += 3'),
('raise b', 'c = 2', 'c += 3'),
(('raise b', 'c = 1', 'c = 2'), 'c += 3', None),
),
)
self.assertGraphEnds(graph, 'a, b', ('c += 3',))
def test_list_comprehension(self):
def test_fn(a):
c = [b for b in a]
return c
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'c = [b for b in a]', 'return c'),
('c = [b for b in a]', 'return c', None),
),
)
self.assertGraphEnds(graph, 'a', ('return c',))
def test_class_definition_empty(self):
def test_fn(a, b):
class C(a(b)):
pass
return C
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a, b', 'class C', 'return C'),
('class C', 'return C', None),
),
)
self.assertGraphEnds(graph, 'a, b', ('return C',))
def test_class_definition_with_members(self):
def test_fn(a, b):
class C(a(b)):
d = 1
return C
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a, b', 'class C', 'return C'),
('class C', 'return C', None),
),
)
self.assertGraphEnds(graph, 'a, b', ('return C',))
def test_import(self):
def test_fn():
from a import b # pylint:disable=g-import-not-at-top
return b
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('', 'from a import b', 'return b'),
('from a import b', 'return b', None),
),
)
self.assertGraphEnds(graph, '', ('return b',))
|
DALI-main
|
dali/test/python/autograph/pyct/test_cfg.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for transpiler module."""
import threading
import unittest
import gast
from nvidia.dali._autograph.pyct import transformer
from nvidia.dali._autograph.pyct import transpiler
class FlipSignTransformer(transformer.Base):
def visit_BinOp(self, node):
if isinstance(node.op, gast.Add):
node.op = gast.Sub()
return self.generic_visit(node)
class TestTranspiler(transpiler.PyToPy):
def get_caching_key(self, ctx):
del ctx
return 0
def get_extra_locals(self):
return {}
def transform_ast(self, node, ctx):
return FlipSignTransformer(ctx).visit(node)
global_var_for_test_global = 1
global_var_for_test_namespace_collisions = object()
class PyToPyTest(unittest.TestCase):
def test_basic(self):
def f(a):
return a + 1
tr = TestTranspiler()
f, _, _ = tr.transform(f, None)
self.assertEqual(f(1), 0)
def test_closure(self):
b = 1
def f(a):
return a + b
tr = TestTranspiler()
f, _, _ = tr.transform(f, None)
self.assertEqual(f(1), 0)
b = 2
self.assertEqual(f(1), -1)
def test_global(self):
def f(a):
return a + global_var_for_test_global
tr = TestTranspiler()
f, _, _ = tr.transform(f, None)
global global_var_for_test_global
global_var_for_test_global = 1
self.assertEqual(f(1), 0)
global_var_for_test_global = 2
self.assertEqual(f(1), -1)
def test_defaults(self):
b = 2
c = 1
def f(a, d=c + 1):
return a + b + d
tr = TestTranspiler()
f, _, _ = tr.transform(f, None)
self.assertEqual(f(1), 1 - 2 - 2)
c = 0
self.assertEqual(f(1), 1 - 2 - 2) # Defaults are evaluated at definition.
b = 1
self.assertEqual(f(1), 1 - 2 - 1)
def test_call_tree(self):
def g(a):
return a + 1
def f(a):
return g(a) + 1
tr = TestTranspiler()
f, _, _ = tr.transform(f, None)
self.assertEqual(f(1), 1 - 1 + 1) # Only f is converted.
def test_lambda(self):
b = 2
f = lambda x: (b + (x if x > 0 else -x))
tr = TestTranspiler()
f, _, _ = tr.transform(f, None)
self.assertEqual(f(1), 2 - 1)
self.assertEqual(f(-1), 2 - 1)
b = 3
self.assertEqual(f(1), 3 - 1)
self.assertEqual(f(-1), 3 - 1)
def test_multiple_lambdas(self):
a, b = 1, 2
# This can be disambiguated by the argument names.
f, _ = (lambda x: a + x, lambda y: b * y)
tr = TestTranspiler()
f, _, _ = tr.transform(f, None)
self.assertEqual(f(1), 1 - 1)
def test_nested_functions(self):
b = 2
def f(x):
def g(x):
return b + x
return g(x)
tr = TestTranspiler()
f, _, _ = tr.transform(f, None)
self.assertEqual(f(1), 2 - 1)
def test_nested_lambda(self):
b = 2
def f(x):
g = lambda x: b + x
return g(x)
tr = TestTranspiler()
f, _, _ = tr.transform(f, None)
self.assertEqual(f(1), 2 - 1)
def test_concurrency(self):
def f():
pass
outputs = []
tr = TestTranspiler()
# Note: this is not a test, it's a required invariant.
assert tr.get_caching_key(None) == tr.get_caching_key(None)
def conversion_thread():
_, mod, _ = tr.transform(f, None)
outputs.append(mod.__name__)
threads = tuple(
threading.Thread(target=conversion_thread) for _ in range(10))
for t in threads:
t.start()
for t in threads:
t.join()
# Races would potentially create multiple functions / modules
# (non-deterministically, but with high likelihood).
self.assertEqual(len(set(outputs)), 1)
def test_reentrance(self):
def test_fn():
return 1 + 1
class ReentrantTranspiler(transpiler.PyToPy):
def __init__(self):
super(ReentrantTranspiler, self).__init__()
self._recursion_depth = 0
def get_caching_key(self, ctx):
del ctx
return 0
def get_extra_locals(self):
return {}
def transform_ast(self, node, ctx):
self._recursion_depth += 1
if self._recursion_depth < 2:
self.transform(test_fn, None)
return FlipSignTransformer(ctx).visit(node)
tr = ReentrantTranspiler()
f, _, _ = tr.transform(test_fn, None)
self.assertEqual(f(), 0)
def test_namespace_collisions_avoided(self):
class TestClass(object):
def global_var_for_test_namespace_collisions(self):
return global_var_for_test_namespace_collisions
tr = TestTranspiler()
obj = TestClass()
f, _, _ = tr.transform(
obj.global_var_for_test_namespace_collisions, None)
self.assertIs(f(obj), global_var_for_test_namespace_collisions)
|
DALI-main
|
dali/test/python/autograph/pyct/test_transpiler.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pretty_printer module."""
import ast
import textwrap
import unittest
from nvidia.dali._autograph.pyct import pretty_printer
class PrettyPrinterTest(unittest.TestCase):
def test_unicode_bytes(self):
source = textwrap.dedent('''
def f():
return b'b', u'u', 'depends_py2_py3'
''')
node = ast.parse(source)
self.assertIsNotNone(pretty_printer.fmt(node))
def test_format(self):
node = ast.FunctionDef(
name='f',
args=ast.arguments(
args=[ast.Name(id='a', ctx=ast.Param())],
vararg=None,
kwarg=None,
defaults=[]),
body=[
ast.Return(
ast.BinOp(
op=ast.Add(),
left=ast.Name(id='a', ctx=ast.Load()),
right=ast.Num(1)))
],
decorator_list=[],
returns=None)
# Just checking for functionality, the color control characters make it
# difficult to inspect the result.
self.assertIsNotNone(pretty_printer.fmt(node))
|
DALI-main
|
dali/test/python/autograph/pyct/test_pretty_printer.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
DALI-main
|
dali/test/python/autograph/pyct/__init__.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for error_utils module."""
import re
import unittest
from nvidia.dali._autograph.pyct import error_utils
from nvidia.dali._autograph.pyct import origin_info
class ErrorMetadataBaseTest(unittest.TestCase):
def test_create_exception_default_constructor(self):
class CustomError(Exception):
pass
em = error_utils.ErrorMetadataBase(
callsite_tb=(),
cause_metadata=None,
cause_message='test message',
source_map={},
converter_filename=None)
exc = em.create_exception(CustomError())
self.assertIsInstance(exc, CustomError)
self.assertIn('test message', str(exc))
def test_create_exception_custom_constructor(self):
class CustomError(Exception):
def __init__(self):
super(CustomError, self).__init__('test_message')
em = error_utils.ErrorMetadataBase(
callsite_tb=(),
cause_metadata=None,
cause_message='test message',
source_map={},
converter_filename=None)
exc = em.create_exception(CustomError())
self.assertIsNone(exc)
def test_get_message_no_code(self):
callsite_tb = [
('/path/one.py', 11, 'test_fn_1', None),
('/path/two.py', 171, 'test_fn_2', 'test code'),
]
cause_message = 'Test message'
em = error_utils.ErrorMetadataBase(
callsite_tb=callsite_tb,
cause_metadata=None,
cause_message=cause_message,
source_map={},
converter_filename=None)
self.assertRegex(
em.get_message(),
re.compile(('"/path/one.py", line 11, in test_fn_1.*'
'"/path/two.py", line 171, in test_fn_2.*'
'Test message'), re.DOTALL))
def test_get_message_converted_code(self):
callsite_tb = [
('/path/one.py', 11, 'test_fn_1', 'test code 1'),
('/path/two.py', 171, 'test_fn_2', 'test code 2'),
('/path/three.py', 171, 'test_fn_3', 'test code 3'),
]
cause_message = 'Test message'
loc = origin_info.LineLocation(filename='/path/other_two.py', lineno=13)
origin_info_value = origin_info.OriginInfo(
loc=loc,
function_name='converted_fn',
source_code_line='converted test code',
comment=None)
em = error_utils.ErrorMetadataBase(
callsite_tb=callsite_tb,
cause_metadata=None,
cause_message=cause_message,
source_map={
origin_info.LineLocation(filename='/path/two.py', lineno=171): origin_info_value
},
converter_filename=None)
result = em.get_message()
self.assertRegex(
result,
re.compile((r'converted_fn \*.*'
r'"/path/three.py", line 171, in test_fn_3.*'
r'Test message'), re.DOTALL))
self.assertNotRegex(result, re.compile('test_fn_1'))
def test_get_message_call_overload(self):
callsite_tb = [
('/path/one.py', 11, 'test_fn_1', 'test code 1'),
('/path/two.py', 0, 'test_fn_2', 'test code 2'),
('/path/three.py', 171, 'test_fn_3', 'test code 3'),
]
cause_message = 'Test message'
em = error_utils.ErrorMetadataBase(
callsite_tb=callsite_tb,
cause_metadata=None,
cause_message=cause_message,
source_map={},
converter_filename='/path/two.py')
self.assertRegex(
em.get_message(),
re.compile((r'"/path/one.py", line 11, in test_fn_1.*'
r'"/path/three.py", line 171, in test_fn_3 \*\*.*'
r'Test message'), re.DOTALL))
|
DALI-main
|
dali/test/python/autograph/pyct/test_error_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for origin_info module."""
import inspect
import sys
import textwrap
import unittest
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import inspect_utils
from nvidia.dali._autograph.pyct import origin_info
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct.testing import basic_definitions
class OriginInfoTest(unittest.TestCase):
def test_create_source_map(self):
source = """
def test_fn(x):
return x + 1
"""
source = textwrap.dedent(source)
node = parser.parse(source)
fake_origin = origin_info.OriginInfo(
loc=origin_info.Location('fake_filename', 3, 7),
function_name='fake_function_name',
source_code_line='fake source line',
comment=None)
anno.setanno(node, anno.Basic.ORIGIN, fake_origin)
source_map = origin_info.create_source_map(node, source, 'test_filename')
loc = origin_info.LineLocation('test_filename', 2)
self.assertIn(loc, source_map)
self.assertIs(source_map[loc], fake_origin)
def _create_source_map(self, test_fn):
node, source = parser.parse_entity(test_fn, ())
origin_info.resolve_entity(node, source, test_fn)
# Creating a source map with the source code as output will create
# an identity map.
return origin_info.create_source_map(node, source, 'test_filename')
def test_create_source_map_identity(self):
test_fn = basic_definitions.simple_function
source_map = self._create_source_map(test_fn)
module_path = inspect.getsourcefile(test_fn)
# Origin line numbers below should match those in basic_definitions.py
fn_start = inspect.getsourcelines(test_fn)[1]
definition_loc = origin_info.LineLocation('test_filename', 1)
self.assertIn(definition_loc, source_map)
self.assertEqual(source_map[definition_loc].loc.lineno, fn_start)
self.assertEqual(source_map[definition_loc].loc.filename, module_path)
self.assertEqual(source_map[definition_loc].function_name,
'simple_function')
def test_create_source_map_multiline_call(self):
test_fn = basic_definitions.function_with_multiline_call
source_map = self._create_source_map(test_fn)
module_path = inspect.getsourcefile(test_fn)
# Origin line numbers below should match those in basic_definitions.py
fn_start = inspect.getsourcelines(test_fn)[1]
call_loc = origin_info.LineLocation('test_filename', 3)
self.assertIn(call_loc, source_map)
self.assertEqual(source_map[call_loc].loc.lineno, fn_start + 2)
self.assertEqual(source_map[call_loc].loc.filename, module_path)
self.assertEqual(source_map[call_loc].function_name,
'function_with_multiline_call')
self.assertEqual(source_map[call_loc].source_code_line, ' return range(')
second_arg_loc = origin_info.LineLocation('test_filename', 5)
self.assertIn(second_arg_loc, source_map)
self.assertEqual(source_map[second_arg_loc].loc.lineno, fn_start + 4)
self.assertEqual(source_map[second_arg_loc].loc.filename, module_path)
self.assertEqual(source_map[second_arg_loc].function_name,
'function_with_multiline_call')
self.assertEqual(source_map[second_arg_loc].source_code_line,
' x + 1,')
def test_create_source_map_no_origin_info(self):
test_fn = basic_definitions.simple_function
node, _ = parser.parse_entity(test_fn,
inspect_utils.getfutureimports(test_fn))
# No origin information should result in an empty map.
test_fn_lines, _ = inspect.getsourcelines(test_fn)
source_map = origin_info.create_source_map(node, '\n'.join(test_fn_lines),
test_fn)
self.assertEqual(source_map, {})
def test_resolve(self):
source = """
def test_fn(x):
'''Docstring.'''
return x # comment
"""
source = textwrap.dedent(source)
node = parser.parse(source)
origin_info.resolve(node, source, 'test_file', 10, 10)
def_origin = anno.getanno(node, anno.Basic.ORIGIN)
self.assertEqual(def_origin.loc.filename, 'test_file')
self.assertEqual(def_origin.loc.lineno, 10)
self.assertEqual(def_origin.loc.col_offset, 10)
self.assertEqual(def_origin.source_code_line, 'def test_fn(x):')
self.assertIsNone(def_origin.comment)
docstring_origin = anno.getanno(node.body[0], anno.Basic.ORIGIN)
self.assertEqual(def_origin.loc.filename, 'test_file')
self.assertEqual(docstring_origin.loc.lineno, 11)
self.assertEqual(docstring_origin.loc.col_offset, 12)
self.assertEqual(docstring_origin.source_code_line, " '''Docstring.'''")
self.assertIsNone(docstring_origin.comment)
ret_origin = anno.getanno(node.body[1], anno.Basic.ORIGIN)
self.assertEqual(def_origin.loc.filename, 'test_file')
self.assertEqual(ret_origin.loc.lineno, 12)
self.assertEqual(ret_origin.loc.col_offset, 12)
self.assertEqual(ret_origin.source_code_line, ' return x # comment')
self.assertEqual(ret_origin.comment, 'comment')
def test_resolve_with_trailing_garbage(self):
# This comment will be missed because the tokenizer fails to reach it.
source = ' lambda: foo([], bar=1)), baz=2)()'
clean_source = 'lambda: foo([], bar=1)'
node = parser.parse(clean_source).value
origin_info.resolve(node, source, 'test_file', 10, 10)
def_origin = anno.getanno(node, anno.Basic.ORIGIN)
self.assertEqual(def_origin.loc.lineno, 10)
self.assertEqual(def_origin.loc.col_offset, 10)
self.assertEqual(def_origin.source_code_line, source)
self.assertIsNone(def_origin.comment)
def test_resolve_entity(self):
test_fn = basic_definitions.simple_function
node, source = parser.parse_entity(
test_fn, inspect_utils.getfutureimports(test_fn))
origin_info.resolve_entity(node, source, test_fn)
# The line numbers below should match those in basic_definitions.py
fn_start = inspect.getsourcelines(test_fn)[1]
def_origin = anno.getanno(node, anno.Basic.ORIGIN)
self.assertEqual(def_origin.loc.lineno, fn_start)
self.assertEqual(def_origin.loc.col_offset, 0)
self.assertEqual(def_origin.source_code_line, 'def simple_function(x):')
self.assertIsNone(def_origin.comment)
docstring_origin = anno.getanno(node.body[0], anno.Basic.ORIGIN)
self.assertEqual(docstring_origin.loc.lineno, fn_start + 1)
self.assertEqual(docstring_origin.loc.col_offset, 2)
self.assertEqual(docstring_origin.source_code_line, ' """Docstring."""')
self.assertIsNone(docstring_origin.comment)
ret_origin = anno.getanno(node.body[1], anno.Basic.ORIGIN)
self.assertEqual(ret_origin.loc.lineno, fn_start + 2)
self.assertEqual(ret_origin.loc.col_offset, 2)
self.assertEqual(ret_origin.source_code_line, ' return x # comment')
self.assertEqual(ret_origin.comment, 'comment')
def test_resolve_entity_nested_function(self):
test_fn = basic_definitions.nested_functions
node, source = parser.parse_entity(
test_fn, inspect_utils.getfutureimports(test_fn))
origin_info.resolve_entity(node, source, test_fn)
# The line numbers below should match those in basic_definitions.py
fn_start = inspect.getsourcelines(test_fn)[1]
inner_def_origin = anno.getanno(node.body[1], anno.Basic.ORIGIN)
self.assertEqual(inner_def_origin.loc.lineno, fn_start + 3)
self.assertEqual(inner_def_origin.loc.col_offset, 2)
self.assertEqual(inner_def_origin.source_code_line, ' def inner_fn(y):')
self.assertIsNone(inner_def_origin.comment)
inner_ret_origin = anno.getanno(node.body[1].body[0], anno.Basic.ORIGIN)
self.assertEqual(inner_ret_origin.loc.lineno, fn_start + 4)
self.assertEqual(inner_ret_origin.loc.col_offset, 4)
self.assertEqual(inner_ret_origin.source_code_line, ' return y')
self.assertIsNone(inner_ret_origin.comment)
def test_resolve_entity_indented_block(self):
test_fn = basic_definitions.SimpleClass.simple_method
node, source = parser.parse_entity(test_fn,
inspect_utils.getfutureimports(test_fn))
origin_info.resolve_entity(node, source, test_fn)
# The line numbers below should match those in basic_definitions.py
fn_start = inspect.getsourcelines(test_fn)[1]
def_origin = anno.getanno(node, anno.Basic.ORIGIN)
self.assertEqual(def_origin.loc.lineno, fn_start)
self.assertEqual(def_origin.loc.col_offset, 2)
self.assertEqual(def_origin.source_code_line, 'def simple_method(self):')
self.assertIsNone(def_origin.comment)
ret_origin = anno.getanno(node.body[0], anno.Basic.ORIGIN)
self.assertEqual(ret_origin.loc.lineno, fn_start + 1)
self.assertEqual(ret_origin.loc.col_offset, 4)
self.assertEqual(ret_origin.source_code_line, ' return self')
self.assertIsNone(ret_origin.comment)
def test_resolve_entity_decorated_function(self):
test_fn = basic_definitions.decorated_function
node, source = parser.parse_entity(test_fn,
inspect_utils.getfutureimports(test_fn))
origin_info.resolve_entity(node, source, test_fn)
# The line numbers below should match those in basic_definitions.py
fn_start = inspect.getsourcelines(test_fn)[1]
def_origin = anno.getanno(node, anno.Basic.ORIGIN)
if sys.version_info >= (3, 8):
self.assertEqual(def_origin.loc.lineno, fn_start + 2)
self.assertEqual(def_origin.source_code_line,
'def decorated_function(x):')
else:
self.assertEqual(def_origin.loc.lineno, fn_start)
self.assertEqual(def_origin.source_code_line, '@basic_decorator')
self.assertEqual(def_origin.loc.col_offset, 0)
self.assertIsNone(def_origin.comment)
if_origin = anno.getanno(node.body[0], anno.Basic.ORIGIN)
self.assertEqual(if_origin.loc.lineno, fn_start + 3)
self.assertEqual(if_origin.loc.col_offset, 2)
self.assertEqual(if_origin.source_code_line, ' if x > 0:')
self.assertIsNone(if_origin.comment)
ret1_origin = anno.getanno(node.body[0].body[0], anno.Basic.ORIGIN)
self.assertEqual(ret1_origin.loc.lineno, fn_start + 4)
self.assertEqual(ret1_origin.loc.col_offset, 4)
self.assertEqual(ret1_origin.source_code_line, ' return 1')
self.assertIsNone(ret1_origin.comment)
ret2_origin = anno.getanno(node.body[1], anno.Basic.ORIGIN)
self.assertEqual(ret2_origin.loc.lineno, fn_start + 5)
self.assertEqual(ret2_origin.loc.col_offset, 2)
self.assertEqual(ret2_origin.source_code_line, ' return 2')
self.assertIsNone(ret2_origin.comment)
|
DALI-main
|
dali/test/python/autograph/pyct/test_origin_info.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for naming module."""
import unittest
from nvidia.dali._autograph.pyct import naming
class NamerTest(unittest.TestCase):
def test_new_symbol_tracks_names(self):
namer = naming.Namer({})
self.assertEqual('temp', namer.new_symbol('temp', set()))
self.assertEqual(('temp',), tuple(sorted(namer.generated_names)))
def test_new_symbol_avoids_duplicates(self):
namer = naming.Namer({})
self.assertEqual('temp', namer.new_symbol('temp', set()))
self.assertEqual('temp_1', namer.new_symbol('temp', set()))
self.assertEqual(('temp', 'temp_1'), tuple(sorted(namer.generated_names)))
def test_new_symbol_avoids_conflicts(self):
namer = naming.Namer({'temp': 1})
# temp is reserved in the global namespace
self.assertEqual('temp_1', namer.new_symbol('temp', set()))
# temp_2 is reserved in the local namespace
self.assertEqual('temp_3', namer.new_symbol('temp', set(('temp_2',))))
self.assertEqual(('temp_1', 'temp_3'), tuple(sorted(namer.generated_names)))
|
DALI-main
|
dali/test/python/autograph/pyct/test_naming.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anno module."""
import ast
import unittest
from nvidia.dali._autograph.pyct import anno
# TODO(mdan): Consider strong types instead of primitives.
class AnnoTest(unittest.TestCase):
def test_basic(self):
node = ast.Name()
self.assertEqual(anno.keys(node), set())
self.assertFalse(anno.hasanno(node, 'foo'))
with self.assertRaises(AttributeError):
anno.getanno(node, 'foo')
anno.setanno(node, 'foo', 3)
self.assertEqual(anno.keys(node), {'foo'})
self.assertTrue(anno.hasanno(node, 'foo'))
self.assertEqual(anno.getanno(node, 'foo'), 3)
self.assertEqual(anno.getanno(node, 'bar', default=7), 7)
anno.delanno(node, 'foo')
self.assertEqual(anno.keys(node), set())
self.assertFalse(anno.hasanno(node, 'foo'))
with self.assertRaises(AttributeError):
anno.getanno(node, 'foo')
self.assertIsNone(anno.getanno(node, 'foo', default=None))
def test_copy(self):
node_1 = ast.Name()
anno.setanno(node_1, 'foo', 3)
node_2 = ast.Name()
anno.copyanno(node_1, node_2, 'foo')
anno.copyanno(node_1, node_2, 'bar')
self.assertTrue(anno.hasanno(node_2, 'foo'))
self.assertFalse(anno.hasanno(node_2, 'bar'))
def test_duplicate(self):
node = ast.If(
test=ast.Num(1),
body=[ast.Expr(ast.Name('bar', ast.Load()))],
orelse=[])
anno.setanno(node, 'spam', 1)
anno.setanno(node, 'ham', 1)
anno.setanno(node.body[0], 'ham', 1)
anno.dup(node, {'spam': 'eggs'})
self.assertTrue(anno.hasanno(node, 'spam'))
self.assertTrue(anno.hasanno(node, 'ham'))
self.assertTrue(anno.hasanno(node, 'eggs'))
self.assertFalse(anno.hasanno(node.body[0], 'eggs'))
|
DALI-main
|
dali/test/python/autograph/pyct/test_anno.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.