python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
|---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import math
import itertools
import cv2
import numpy as np
from nvidia.dali import pipeline_def, fn, types
from test_utils import get_dali_extra_path, check_batch
from nose2.tools import params
data_root = get_dali_extra_path()
images_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
def equalize_cv_baseline(img, layout):
if layout == "HW":
return cv2.equalizeHist(img)
if layout == "HWC":
img = img.transpose(2, 0, 1)
axis = 2
else:
assert layout == "CHW", f"{layout}"
axis = 0
return np.stack([cv2.equalizeHist(channel) for channel in img], axis=axis)
@pipeline_def
def images_pipeline(layout, dev):
images, _ = fn.readers.file(name="Reader", file_root=images_dir, prefetch_queue_depth=2,
random_shuffle=True, seed=42)
decoder = "mixed" if dev == "gpu" else "cpu"
if layout == "HW":
images = fn.decoders.image(images, device=decoder, output_type=types.GRAY)
images = fn.squeeze(images, axes=2)
else:
assert layout in ["HWC", "CHW"], f"{layout}"
images = fn.decoders.image(images, device=decoder, output_type=types.RGB)
if layout == "CHW":
images = fn.transpose(images, perm=[2, 0, 1])
equalized = fn.experimental.equalize(images)
return equalized, images
@params(*tuple(
itertools.product(("cpu", "gpu"),
(("HWC", 1), ("HWC", 32), ("CHW", 1), ("CHW", 7), ("HW", 253), ("HW", 128)))))
def test_image_pipeline(dev, layout_batch_size):
layout, batch_size = layout_batch_size
num_iters = 2
pipe = images_pipeline(num_threads=4, device_id=0, batch_size=batch_size, layout=layout,
dev=dev)
pipe.build()
for _ in range(num_iters):
equalized, imgs = pipe.run()
if dev == "gpu":
imgs = imgs.as_cpu()
equalized = equalized.as_cpu()
equalized = [np.array(img) for img in equalized]
imgs = [np.array(img) for img in imgs]
assert len(equalized) == len(imgs)
baseline = [equalize_cv_baseline(img, layout) for img in imgs]
check_batch(equalized, baseline, max_allowed_error=1)
@params(("cpu", ), ("gpu", ))
def test_multichannel(dev):
sizes = [(200, 300), (700, 500), (1024, 200), (200, 1024), (1024, 1024)]
num_channels = [1, 2, 3, 4, 5, 13]
# keep len(sizes) and len(num_channels) co-prime to have all combinations
assert math.gcd(len(sizes), len(num_channels)) == 1
batch_size = len(sizes) * len(num_channels)
rng = np.random.default_rng(424242)
num_iters = 2
def input_sample(sample_info):
idx_in_batch = sample_info.idx_in_batch
size = sizes[idx_in_batch % len(sizes)]
num_channel = num_channels[idx_in_batch % len(num_channels)]
shape = (size[0], size[1], num_channel)
return np.uint8(rng.uniform(0, 255, shape))
@pipeline_def(batch_size=batch_size, device_id=0, num_threads=4, seed=42)
def pipeline():
input = fn.external_source(input_sample, batch=False)
if dev == "gpu":
input = input.gpu()
return fn.experimental.equalize(input), input
pipe = pipeline()
pipe.build()
for _ in range(num_iters):
equalized, imgs = pipe.run()
if dev == "gpu":
imgs = imgs.as_cpu()
equalized = equalized.as_cpu()
equalized = [np.array(img) for img in equalized]
imgs = [np.array(img) for img in imgs]
assert len(equalized) == len(imgs)
baseline = [equalize_cv_baseline(img, "HWC") for img in imgs]
check_batch(equalized, baseline, max_allowed_error=1)
|
DALI-main
|
dali/test/python/operator_2/test_equalize.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import numpy as np
from nvidia.dali import pipeline_def, fn, types
from test_utils import get_dali_extra_path, np_type_to_dali, check_batch
from nose2.tools import params
from nose.plugins.attrib import attr
from filter_test_utils import filter_baseline, filter_baseline_layout
data_root = get_dali_extra_path()
images_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
def filter_enumerated(shape):
n = np.prod(shape)
weight_sum = n * (n + 1) / 2
return np.float32(1 / weight_sum * np.arange(1, n + 1).reshape(shape))
def create_filter_anchor_source(shapes):
rng = np.random.default_rng(42)
def source(sample_info):
shape_idx = sample_info.idx_in_batch % len(shapes)
shape = shapes[shape_idx]
anchor = np.int32(rng.uniform(0, shape))
for dim in range(len(anchor)):
if rng.uniform(0, 4) >= 3:
anchor[dim] = -1
anchor = np.array(anchor, dtype=np.int32)
return filter_enumerated(shape), anchor
return source
def create_sample_source(shapes, dtype):
rng = np.random.default_rng(42)
if not np.issubdtype(dtype, np.integer):
low, high = 0, 1
else:
type_info = np.iinfo(dtype)
low, high = type_info.min, type_info.max
def source(sample_info):
shape_idx = sample_info.idx_in_batch % len(shapes)
shape = shapes[shape_idx]
return dtype(rng.uniform(low, high, shape))
return source
@pipeline_def
def images_pipeline(dev, shapes, border, in_dtype, mode):
images, _ = fn.readers.file(name="Reader", file_root=images_dir, prefetch_queue_depth=2,
random_shuffle=True, seed=42)
images = fn.experimental.decoders.image(images, device="cpu", output_type=types.RGB,
dtype=np_type_to_dali(in_dtype))
if dev == "gpu":
images = images.gpu()
filters, anchors = fn.external_source(source=create_filter_anchor_source(shapes), batch=False,
num_outputs=2)
fill_val_limit = 1 if not np.issubdtype(in_dtype, np.integer) else np.iinfo(in_dtype).max
fill_values = fn.random.uniform(range=[0, fill_val_limit], dtype=np_type_to_dali(in_dtype))
if border == "constant":
convolved = fn.experimental.filter(images, filters, fill_values, anchor=anchors,
border=border, mode=mode)
else:
convolved = fn.experimental.filter(images, filters, anchor=anchors, border=border,
mode=mode)
return convolved, images, filters, anchors, fill_values
@pipeline_def
def sample_pipeline(sample_shapes, sample_layout, filter_shapes, border, in_dtype, mode, dev):
samples = fn.external_source(source=create_sample_source(sample_shapes, in_dtype), batch=False,
layout=sample_layout)
filters, anchors = fn.external_source(source=create_filter_anchor_source(filter_shapes),
batch=False, num_outputs=2)
fill_val_limit = 1 if not np.issubdtype(in_dtype, np.integer) else np.iinfo(in_dtype).max
rand_fill_dtype = in_dtype if in_dtype != np.float16 else np.float32
fill_values = fn.random.uniform(range=[0, fill_val_limit],
dtype=np_type_to_dali(rand_fill_dtype))
fill_values = fn.cast_like(fill_values, samples)
in_samples = samples.gpu() if dev == "gpu" else samples
if border == "constant":
convolved = fn.experimental.filter(in_samples, filters, fill_values, anchor=anchors,
border=border, mode=mode)
else:
convolved = fn.experimental.filter(in_samples, filters, anchor=anchors, border=border,
mode=mode)
return convolved, samples, filters, anchors, fill_values
@attr('scipy')
@params(*tuple((dev, ) + params for dev, params in itertools.product(
["cpu", "gpu"],
[
(np.uint8, 16, "101", "same"),
(np.uint8, 11, "clamp", "same"),
(np.uint8, 4, "constant", "same"),
(np.int8, 7, "1001", "same"),
(np.int8, 5, "wrap", "same"),
(np.int16, 8, "wrap", "same"),
(np.int16, 1, "constant", "same"),
(np.uint16, 5, "clamp", "same"),
(np.uint16, 2, "1001", "same"),
(np.float32, 11, "constant", "same"),
(np.float32, 13, "101", "same"),
(np.uint8, 4, "constant", "valid"),
(np.float32, 7, "101", "valid"),
],
# for cpu, skip int8 and border wrap cases, as those are not supported by opencv's filter2d
) if dev != "cpu" or params[0] != np.int8))
def test_image_pipeline(dev, dtype, batch_size, border, mode):
shapes = [(3, 3), (8, 8), (31, 1), (1, 31), (1, 1), (51, 3), (3, 51), (2, 40), (2, 40), (2, 2),
(27, 27)]
num_iters = 2
pipe = images_pipeline(batch_size=batch_size, num_threads=4, device_id=0, border=border,
in_dtype=dtype, shapes=shapes, mode=mode, dev=dev)
pipe.build()
atol = 1 if np.issubdtype(dtype, np.integer) else 1e-5
for _ in range(num_iters):
filtered_imgs, imgs, kernels, anchors, fill_values = pipe.run()
if dev == "gpu":
filtered_imgs = filtered_imgs.as_cpu()
imgs = imgs.as_cpu()
filtered_imgs = [np.array(img) for img in filtered_imgs]
imgs = [np.array(img) for img in imgs]
kernels = [np.array(kernel) for kernel in kernels]
anchors = [np.array(anchor) for anchor in anchors]
fill_values = [np.array(fv) for fv in fill_values]
assert len(filtered_imgs) == len(imgs) == len(kernels) == len(anchors) == len(fill_values)
baseline = [
filter_baseline(img, kernel, anchor, border, fill_value, mode, has_channels=True)
for img, kernel, anchor, fill_value in zip(imgs, kernels, anchors, fill_values)
]
check_batch(filtered_imgs, baseline, max_allowed_error=atol)
sample_2d_cases = tuple((dev, ) + params for dev, params in itertools.product(
["cpu", "gpu"],
[
(np.float16, "HWC", [(501, 127, 3), (600, 600, 1), (128, 256, 5), (200, 500, 2)],
[(3, 3), (8, 5), (10, 4), (70, 1), (1, 70)], 8, "101", "same"),
(np.int8, "HWC", [(403, 201, 150), (128, 256, 5), (200, 500, 70)],
[(3, 3), (31, 2), (2, 32), (5, 5)], 4, "101", "same"),
(np.uint8, "HW", [(501, 127), (600, 600), (128, 256), (200, 500)],
[(3, 3), (8, 5), (10, 4), (70, 1), (1, 70)], 8, "1001", "same"),
(np.float16, "CHW", [(3, 501, 127), (1, 600, 600), (10, 1026, 741), (7, 200, 500)],
[(3, 3), (8, 5), (10, 4), (70, 1), (1, 70)], 8, "wrap", "same"),
(np.uint8, "CHW", [(3, 501, 127), (1, 600, 600), (10, 1026, 741), (7, 200, 500)],
[(3, 3), (8, 5), (10, 4), (70, 1), (1, 70)], 8, "wrap", "valid"),
(np.uint16, "FCHW", [(4, 3, 501, 127), (5, 1, 600, 600), (2, 10, 1026, 741), (1, 7, 200,
500)],
[(3, 3), (8, 5), (10, 4), (70, 1), (1, 70)], 8, "clamp", "same"),
(np.int8, "HWC", [(501, 127, 3), (4096, 1, 3), (1, 4096, 2), (1, 1, 1)],
[(3, 3), (8, 5), (10, 4), (70, 1), (1, 70)], 8, "wrap", "same"),
(np.float32, "HW", [(1024 * 1024, 1), (1, 1024 * 1024)],
[(256, 1), (1, 256), (1, 257), (257, 1)], 4, "101", "same"),
],
) if dev == "gpu" or (params[0] not in [np.float16, np.int8]))
sample_3d_cases = (
("gpu", np.uint8, "DHWC", [(300, 300, 300, 3), (128, 256, 50, 1), (200, 200, 200, 2),
(128, 64, 50, 1)], [(3, 3, 3), (31, 1, 1), (1, 31, 1),
(1, 1, 31)], 4, "101", "same"),
("gpu", np.float32, "CDHW", [(3, 300, 300, 300),
(1, 128, 256, 50)], [(4, 3, 2), (7, 1, 2)], 2, "1001", "same"),
("gpu", np.uint16, "DHW", [(300, 300, 300), (128, 256, 50),
(200, 500, 200)], [(3, 3, 3), (31, 1, 1), (1, 31, 1),
(1, 1, 31)], 4, "constant", "same"),
("gpu", np.float32, "DHW", [(1024 * 1024, 1, 1), (1, 1024 * 1024, 1),
(1, 1, 1024 * 1024)], [(256, 1, 1), (1, 257, 1),
(1, 1, 258)], 3, "101", "same"),
("gpu", np.float32, "DHW", [(1024 * 1024, 1, 1), (1, 1024 * 1024, 1),
(1, 1, 1024 * 1024)], [(1, 256, 1), (1, 1, 257),
(258, 1, 1)], 3, "101", "same"),
)
@attr('scipy')
@attr('slow')
@params(*(sample_2d_cases + sample_3d_cases))
def slow_test_samples(dev, dtype, sample_layout, sample_shapes, filter_shapes, batch_size, border,
mode):
num_iters = 2
pipe = sample_pipeline(batch_size=batch_size, num_threads=4, device_id=0,
sample_shapes=sample_shapes, sample_layout=sample_layout,
filter_shapes=filter_shapes, border=border, in_dtype=dtype, mode=mode,
dev=dev)
pipe.build()
if dtype == np.float32:
atol = 1e-5
elif dtype == np.float16:
atol = 1e-2
else:
assert np.issubdtype(dtype, np.integer)
atol = 1
for _ in range(num_iters):
flt_samples, samples, kernels, anchors, fill_values = pipe.run()
if dev == "gpu":
flt_samples = flt_samples.as_cpu()
flt_samples = [np.array(img) for img in flt_samples]
samples = [np.array(sample) for sample in samples]
kernels = [np.array(kernel) for kernel in kernels]
anchors = [np.array(anchor) for anchor in anchors]
fill_values = [np.array(fv) for fv in fill_values]
assert len(flt_samples) == len(samples) == len(kernels) == len(anchors) == len(fill_values)
baseline = [
filter_baseline_layout(sample_layout, sample, kernel, anchor, border, fill_value, mode)
for sample, kernel, anchor, fill_value in zip(samples, kernels, anchors, fill_values)
]
check_batch(flt_samples, baseline, max_allowed_error=atol)
|
DALI-main
|
dali/test/python/operator_2/test_filter.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia import dali
from nvidia.dali import fn
import numpy as np
from nvidia.dali.pipeline import Pipeline
from nose_utils import assert_raises
@dali.pipeline_def(batch_size=2, num_threads=3, device_id=0)
def index_pipe(data_source, indexing_func):
src = data_source
cpu = indexing_func(src)
gpu = indexing_func(src.gpu())
return src, cpu, gpu
def test_plain_indexing():
data = [np.float32([[0, 1, 2], [3, 4, 5]]), np.float32([[0, 1], [2, 3], [4, 5]])]
src = fn.external_source(lambda: data, layout="AB")
pipe = index_pipe(src, lambda x: x[1, 1])
pipe.build()
inp, cpu, gpu = pipe.run()
for i in range(len(inp)):
x = inp.at(i)
assert np.array_equal(x[1, 1], cpu.at(i))
assert np.array_equal(x[1, 1], gpu.as_cpu().at(i))
def _test_indexing(
data_gen, input_layout, output_layout, dali_index_func, ref_index_func=None
):
src = fn.external_source(data_gen, layout=input_layout)
pipe = index_pipe(src, dali_index_func)
pipe.build()
inp, cpu, gpu = pipe.run()
for i in range(len(inp)):
x = inp.at(i)
ref = (ref_index_func or dali_index_func)(x)
assert np.array_equal(ref, cpu.at(i))
assert np.array_equal(ref, gpu.as_cpu().at(i))
assert cpu.layout() == output_layout
assert gpu.layout() == output_layout
def test_constant_ranges():
def data_gen():
return [
np.float32([[0, 1, 2], [3, 4, 5]]),
np.float32([[0, 1], [2, 3], [4, 5]]),
]
yield _test_indexing, data_gen, "AB", "AB", lambda x: x[1:, :2], None
yield _test_indexing, data_gen, "AB", "AB", lambda x: x[-1:, :-2], None
yield _test_indexing, data_gen, "AB", "AB", lambda x: x[:-1, :-1], None
yield _test_indexing, data_gen, "AB", "B", lambda x: x[1, :2], None
yield _test_indexing, data_gen, "AB", "B", lambda x: x[1, :-2], None
yield _test_indexing, data_gen, "AB", "A", lambda x: x[:-1, -1], None
yield _test_indexing, data_gen, "AB", "A", lambda x: x[:-1, 0], None
yield _test_indexing, data_gen, "AB", "AB", lambda x: x[::-1, :], None
yield _test_indexing, data_gen, "AB", "AB", lambda x: x[::-2, :], None
yield _test_indexing, data_gen, "AB", "AB", lambda x: x[::2, :], None
yield _test_indexing, data_gen, "AB", "AB", lambda x: x[:, ::-1], None
yield _test_indexing, data_gen, "AB", "AB", lambda x: x[:, ::2], None
yield _test_indexing, data_gen, "AB", "AB", lambda x: x[:, ::-2], None
yield _test_indexing, data_gen, "AB", "AB", lambda x: x[:, 1::-2], None
yield _test_indexing, data_gen, "AB", "AB", lambda x: x[:, -2::-1], None
yield _test_indexing, data_gen, "AB", "AB", lambda x: x[:-2:2, :], None
yield _test_indexing, data_gen, "AB", "AB", lambda x: x[:, 2::-2], None
yield _test_indexing, data_gen, "AB", "AB", lambda x: x[:, :1:-1], None
yield _test_indexing, data_gen, "AB", "AB", lambda x: x[::2, ::2], None
yield _test_indexing, data_gen, "AB", "AB", lambda x: x[::-1, ::-1], None
yield _test_indexing, data_gen, "AB", "AB", lambda x: x[::-2, ::-2], None
def test_swapped_ends():
data = [np.uint8([1, 2, 3]), np.uint8([1, 2])]
src = fn.external_source(lambda: data)
pipe = index_pipe(src, lambda x: x[2:1])
pipe.build()
inp, cpu, gpu = pipe.run()
for i in range(len(inp)):
x = inp.at(i)
assert np.array_equal(x[2:1], cpu.at(i))
assert np.array_equal(x[2:1], gpu.as_cpu().at(i))
def test_noop():
node = dali.types.Constant(np.float32([1, 2, 2]))
indexed = node[:]
assert "SubscriptDimCheck" in indexed.name
def test_runtime_indexing():
def data_gen():
return [
np.float32([[0, 1, 2], [3, 4, 5]]),
np.float32([[0, 1], [2, 3], [4, 5]]),
]
src = fn.external_source(data_gen)
lo_idxs = [np.array(x, dtype=np.int64) for x in [1, -5, 0, 2, -2, 1]]
hi_idxs = [np.array(x, dtype=np.int16) for x in [5, -1, 1, 2, 4]]
lo0 = fn.external_source(source=lo_idxs, batch=False, cycle=True)
hi1 = fn.external_source(source=hi_idxs, batch=False, cycle=True)
pipe = index_pipe(src, lambda x: x[lo0:, :hi1])
pipe.build()
j = 0
k = 0
for _ in range(4):
inp, cpu, gpu = pipe.run()
for i in range(len(inp)):
x = inp.at(i)
# fmt: off
ref = x[lo_idxs[j]:, :hi_idxs[k]]
# fmt: on
j = (j + 1) % len(lo_idxs)
k = (k + 1) % len(hi_idxs)
assert np.array_equal(ref, cpu.at(i))
assert np.array_equal(ref, gpu.as_cpu().at(i))
def test_runtime_stride_dim1():
def data_gen():
return [
np.arange(12, dtype=np.float32).reshape(4, 3),
np.arange(20, dtype=np.float32).reshape(4, 5),
]
src = fn.external_source(data_gen)
strides = [np.array(x, dtype=np.int64) for x in [1, 2, -1, -2, -5]]
stride = fn.external_source(source=strides, batch=False, cycle=True)
pipe = index_pipe(src, lambda x: x[::stride])
pipe.build()
j = 0
for _ in range(4):
inp, cpu, gpu = pipe.run()
for i in range(len(inp)):
x = inp.at(i)
# fmt: off
ref = x[::strides[j]]
# fmt: on
assert np.array_equal(ref, cpu.at(i))
assert np.array_equal(ref, gpu.as_cpu().at(i))
j = (j + 1) % len(strides)
def test_runtime_stride_dim2():
def data_gen():
return [
np.arange(12, dtype=np.float32).reshape(4, 3),
np.arange(20, dtype=np.float32).reshape(4, 5),
]
src = fn.external_source(data_gen)
strides = [np.array(x, dtype=np.int64) for x in [1, 2, -1, -2, -5]]
stride = fn.external_source(source=strides, batch=False, cycle=True)
pipe = index_pipe(src, lambda x: x[:, ::stride])
pipe.build()
j = 0
for _ in range(4):
inp, cpu, gpu = pipe.run()
for i in range(len(inp)):
x = inp.at(i)
# fmt: off
ref = x[:, ::strides[j]]
# fmt: on
assert np.array_equal(ref, cpu.at(i))
assert np.array_equal(ref, gpu.as_cpu().at(i))
j = (j + 1) % len(strides)
def test_new_axis():
def data_gen():
return [
np.float32([[0, 1, 2], [3, 4, 5]]),
np.float32([[0, 1], [2, 3], [4, 5]]),
]
# fmt: off
yield (_test_indexing, data_gen, "AB", "",
lambda x: x[1:, dali.newaxis, :2],
lambda x: x[1:, np.newaxis, :2])
yield (_test_indexing, data_gen, "AB", "CAB",
lambda x: x[dali.newaxis("C"), -1:, :-2],
lambda x: x[np.newaxis, -1:, :-2])
yield (_test_indexing, data_gen, "AB", "ACB",
lambda x: x[:, dali.newaxis("C"), :],
lambda x: x[:, np.newaxis, :])
yield (_test_indexing, data_gen, "AB", "C",
lambda x: x[1, dali.newaxis("C"), 1],
lambda x: x[1, np.newaxis, 1])
# fmt: on
def _test_invalid_args(device, args, message, run):
data = [np.uint8([[1, 2, 3]]), np.uint8([[1, 2]])]
pipe = Pipeline(2, 1, 0)
src = fn.external_source(lambda: data, device=device)
pipe.set_outputs(fn.tensor_subscript(src, **args))
with assert_raises(RuntimeError, glob=message):
pipe.build()
if run:
pipe.run()
def test_inconsistent_args():
for device in ["cpu", "gpu"]:
for args, message in [
({"lo_0": 0, "at_0": 0}, "both as an index"),
({"at_0": 0, "step_0": 1}, "cannot have a step"),
]:
yield _test_invalid_args, device, args, message, False
def _test_out_of_range(device, idx):
data = [np.uint8([1, 2, 3]), np.uint8([1, 2])]
src = fn.external_source(lambda: data, device=device)
pipe = index_pipe(src, lambda x: x[idx])
pipe.build()
with assert_raises(RuntimeError, glob="out of range"):
_ = pipe.run()
def test_out_of_range():
for device in ["cpu", "gpu"]:
for idx in [-3, 2]:
yield _test_out_of_range, device, idx
def _test_too_many_indices(device):
data = [np.uint8([1, 2, 3]), np.uint8([1, 2])]
src = fn.external_source(lambda: data, device=device)
pipe = index_pipe(src, lambda x: x[1, :])
# Verified by tensor_subscript
with assert_raises(RuntimeError, glob="Too many indices"):
pipe.build()
_ = pipe.run()
# Verified by subscript_dim_check
pipe = index_pipe(src, lambda x: x[:, :])
with assert_raises(RuntimeError, glob="Too many indices"):
pipe.build()
_ = pipe.run()
# Verified by expand_dims
pipe = index_pipe(src, lambda x: x[:, :, dali.newaxis])
with assert_raises(RuntimeError, glob="not enough dimensions"):
pipe.build()
_ = pipe.run()
# Verified by subscript_dim_check
pipe = index_pipe(src, lambda x: x[dali.newaxis, :, dali.newaxis, :])
with assert_raises(RuntimeError, glob="Too many indices"):
pipe.build()
_ = pipe.run()
def test_zero_stride_error():
data = [np.uint8([1, 2, 3]), np.uint8([1, 2])]
src = fn.external_source(lambda: data)
pipe = index_pipe(src, lambda x: x[::0])
with assert_raises(RuntimeError, glob="Step cannot be zero"):
pipe.build()
_ = pipe.run()
def test_too_many_indices():
for device in ["cpu", "gpu"]:
yield _test_too_many_indices, device
def test_ellipsis_not_implemented():
data = [np.uint8([1, 2, 3]), np.uint8([1, 2])]
src = fn.external_source(lambda: data)
with assert_raises(NotImplementedError):
_ = src[..., :1]
def test_multiple_skipped_dims():
data = [np.arange(64, dtype=np.float32).reshape(4, 2, 2, 4),
np.arange(120, dtype=np.float32).reshape(4, 2, 3, 5)]
src = fn.external_source(lambda: data, layout="ABCD")
pipe = index_pipe(src, lambda x: x[1, :, :, 1])
pipe.build()
inp, cpu, gpu = pipe.run()
for i in range(len(inp)):
x = inp.at(i)
assert np.array_equal(x[1, :, :, 1], cpu.at(i))
assert np.array_equal(x[1, :, :, 1], gpu.as_cpu().at(i))
|
DALI-main
|
dali/test/python/operator_2/test_subscript.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
import math
import os
import random
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali as dali
from test_utils import compare_pipelines
from sequences_test_utils import (ArgData, ArgDesc, ArgCb, ParamsProvider, get_video_input_cases,
sequence_suite_helper)
test_data_root = os.environ['DALI_EXTRA_PATH']
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
def get_output_size(angle, input_size, parity_correction=True):
cosa = abs(math.cos(angle))
sina = abs(math.sin(angle))
(h, w) = input_size[0:2]
eps = 1e-2
out_w = int(math.ceil(w * cosa + h * sina - eps))
out_h = int(math.ceil(h * cosa + w * sina - eps))
if not parity_correction:
return (out_h, out_w)
if sina <= cosa:
if out_w % 2 != w % 2:
out_w += 1
if out_h % 2 != h % 2:
out_h += 1
else:
if out_w % 2 != h % 2:
out_w += 1
if out_h % 2 != w % 2:
out_h += 1
return (out_h, out_w)
def get_3d_lin_rotation(angle, axis):
# mirrors transform.h:rotation3D
if not angle:
return np.eye((3, 3), dtype=np.float32)
axis_norm = np.linalg.norm(axis)
axis = [dim / axis_norm for dim in axis]
u, v, w = axis
cosa = math.cos(angle)
sina = math.sin(angle)
return np.array([
[u * u + (v * v + w * w) * cosa, u * v * (1 - cosa) - w * sina, u * w * (1 - cosa) + v * sina], # noqa:E501
[u * v * (1 - cosa) + w * sina, v * v + (u * u + w * w) * cosa, v * w * (1 - cosa) - u * sina], # noqa:E501
[u * w * (1 - cosa) - v * sina, v * w * (1 - cosa) + u * sina, w * w + (u * u + v * v) * cosa], # noqa:E501
], dtype=np.float32)
def get_3d_output_size(angle, axis, input_size, parity_correction=False):
transform = np.abs(get_3d_lin_rotation(angle, axis))
eps = 1e-2
in_size = np.array(input_size[2::-1], dtype=np.int32)
out_size = np.int32(np.ceil(np.matmul(transform, in_size) - eps))
if parity_correction:
dominant_axis = np.argmax(transform, axis=1)
out_size += (out_size % 2) ^ (in_size[dominant_axis] % 2)
return out_size[::-1]
def get_transform(angle, input_size, output_size):
cosa = math.cos(angle)
sina = math.sin(angle)
(out_h, out_w) = output_size[0:2]
(in_h, in_w) = input_size[0:2]
t1 = np.array([
[1, 0, -out_w * 0.5],
[0, 1, -out_h * 0.5],
[0, 0, 1]])
r = np.array([
[cosa, -sina, 0],
[sina, cosa, 0],
[0, 0, 1]])
t2 = np.array([
[1, 0, in_w * 0.5],
[0, 1, in_h * 0.5],
[0, 0, 1]])
return (np.matmul(t2, np.matmul(r, t1)))[0:2, 0:3]
def ToCVMatrix(matrix):
offset = np.matmul(matrix, np.array([[0.5], [0.5], [1]]))
result = matrix.copy()
result[0][2] = offset[0] - 0.5
result[1][2] = offset[1] - 0.5
return result
def CVRotate(output_type, input_type, fixed_size):
def warp_fn(img, angle):
in_size = img.shape[0:2]
angle = math.radians(angle)
out_size = fixed_size if fixed_size is not None else get_output_size(angle, in_size)
matrix = get_transform(angle, in_size, out_size)
matrix = ToCVMatrix(matrix)
if output_type == dali.types.FLOAT or input_type == dali.types.FLOAT:
img = np.float32(img)
out_size_wh = (out_size[1], out_size[0])
out = cv2.warpAffine(img,
matrix,
out_size_wh,
borderMode=cv2.BORDER_CONSTANT,
borderValue=[42, 42, 42],
flags=(cv2.INTER_LINEAR | cv2.WARP_INVERSE_MAP))
if output_type == dali.types.UINT8 and input_type == dali.types.FLOAT:
out = np.uint8(np.clip(out, 0, 255))
return out
return warp_fn
class RotatePipeline(Pipeline):
def __init__(self,
device,
batch_size,
output_type,
input_type,
fixed_size=None,
num_threads=3,
device_id=0,
num_gpus=1):
super(RotatePipeline, self).__init__(batch_size,
num_threads,
device_id,
seed=7865,
exec_async=False,
exec_pipelined=False)
self.name = device
self.input = ops.readers.Caffe(path=caffe_db_folder,
shard_id=device_id,
num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
if input_type != dali.types.UINT8:
self.cast = ops.Cast(device=device, dtype=input_type)
else:
self.cast = None
self.uniform = ops.random.Uniform(range=(-180.0, 180.0), seed=42)
self.rotate = ops.Rotate(device=device, size=fixed_size, fill_value=42, dtype=output_type)
def define_graph(self):
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
if self.rotate.device == "gpu":
images = images.gpu()
if self.cast:
images = self.cast(images)
outputs = self.rotate(images, angle=self.uniform())
return outputs
class CVPipeline(Pipeline):
def __init__(self,
batch_size,
output_type,
input_type,
fixed_size,
num_threads=3,
device_id=0,
num_gpus=1):
super(CVPipeline, self).__init__(batch_size,
num_threads,
device_id,
seed=7865,
exec_async=False,
exec_pipelined=False)
self.name = "cv"
self.input = ops.readers.Caffe(path=caffe_db_folder,
shard_id=device_id,
num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.rotate = ops.PythonFunction(function=CVRotate(output_type, input_type, fixed_size),
output_layouts="HWC")
self.uniform = ops.random.Uniform(range=(-180.0, 180.0), seed=42)
self.iter = 0
def define_graph(self):
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
angles = self.uniform()
outputs = self.rotate(images, angles)
return outputs
def compare(pipe1, pipe2, eps):
pipe1.build()
pipe2.build()
epoch_size = pipe1.epoch_size("Reader")
batch_size = pipe1.max_batch_size
niter = 1 if batch_size >= epoch_size else 2
compare_pipelines(pipe1, pipe2, batch_size, niter, eps)
io_types = [
(dali.types.UINT8, dali.types.UINT8),
(dali.types.UINT8, dali.types.FLOAT),
(dali.types.FLOAT, dali.types.UINT8),
(dali.types.FLOAT, dali.types.FLOAT)
]
def create_pipeline(backend, *args):
if backend == "cv":
return CVPipeline(*args)
else:
return RotatePipeline(backend, *args)
def run_cases(backend1, backend2, epsilon):
for output_size in [None, (160, 240)]:
for (itype, otype) in io_types:
batch_size = np.random.choice([1, 4, 19])
def run_case(backend1, backend2, *args):
pipe1 = create_pipeline(backend1, *args)
pipe2 = create_pipeline(backend2, *args)
compare(pipe1, pipe2, epsilon)
yield run_case, backend1, backend2, batch_size, otype, itype, output_size
def test_gpu_vs_cv():
for test in run_cases("gpu", "cv", 8):
yield test
def test_cpu_vs_cv():
for test in run_cases("cpu", "cv", 8):
yield test
def test_gpu_vs_cpu():
for test in run_cases("gpu", "cpu", 1):
yield test
def infer_sequence_size(input_shapes, angles, axes=None):
assert len(input_shapes) == len(angles)
assert axes is None or len(axes) == len(angles)
if axes is None:
no_correction_shapes = [
np.array(get_output_size(math.radians(angle), shape, False), dtype=np.int32)
for shape, angle in zip(input_shapes, angles)
]
corrected_shapes = [
np.array(get_output_size(math.radians(angle), shape, True), dtype=np.int32)
for shape, angle in zip(input_shapes, angles)
]
else:
no_correction_shapes = [
np.array(get_3d_output_size(math.radians(angle), axis, shape, False), dtype=np.int32)
for shape, angle, axis in zip(input_shapes, angles, axes)
]
corrected_shapes = [
np.array(get_3d_output_size(math.radians(angle), axis, shape, True), dtype=np.int32)
for shape, angle, axis in zip(input_shapes, angles, axes)
]
max_shape = np.max(no_correction_shapes, axis=0)
parity = np.sum(np.array(corrected_shapes, dtype=np.int32) % 2, axis=0)
for i in range(len(max_shape)):
if max_shape[i] % 2 != (2 * parity[i] > len(input_shapes)):
max_shape[i] += 1
return max_shape
def sequence_batch_output_size(unfolded_extents, input_batch, angle_batch, axis_batch=None):
def iter_by_groups():
assert sum(unfolded_extents) == len(input_batch)
assert len(input_batch) == len(angle_batch)
assert axis_batch is None or len(axis_batch) == len(angle_batch)
offset = 0
for group in unfolded_extents:
yield input_batch[offset:offset + group], angle_batch[offset:offset + group], \
None if axis_batch is None else axis_batch[offset:offset + group]
offset += group
sequence_output_shape = [
infer_sequence_size([frame.shape for frame in input_frames], angles, axes)
for input_frames, angles, axes in iter_by_groups()
]
return [
output_shape for output_shape, num_frames in zip(sequence_output_shape, unfolded_extents)
for _ in range(num_frames)
]
class RotatePerFrameParamsProvider(ParamsProvider):
"""
Provides per frame angle argument input to the video rotate operator test.
The expanded baseline pipeline must be provided with additional argument ``size``
to make allowance for coalescing of inferred frames sizes
"""
def __init__(self, input_params):
super().__init__(input_params)
def expand_params(self):
assert self.input_data.desc.expandable_prefix == "F"
expanded_params = super().expand_params()
params_dict = {param_data.desc.name: param_data for param_data in expanded_params}
expanded_angles = params_dict.get('angle')
expanded_axis = params_dict.get('axis')
assert expanded_angles is not None and 'size' not in self.fixed_params and \
'size' not in params_dict
sequence_extents = [
[sample.shape[0] for sample in input_batch]
for input_batch in self.input_data.data]
output_size_params = (sequence_extents, self.unfolded_input.data, expanded_angles.data)
if expanded_axis is not None:
output_size_params += (expanded_axis.data,)
output_sizes = [
sequence_batch_output_size(*args)
for args in zip(*output_size_params)]
expanded_params.append(ArgData(ArgDesc("size", "", "cpu"), output_sizes))
return expanded_params
def __repr__(self):
return "{}({})".format(repr(self.__class__), repr(self.input_params))
def test_video():
def small_angle(sample_desc):
return np.array(sample_desc.rng.uniform(-44., 44.), dtype=np.float32)
def random_angle(sample_desc):
return np.array(sample_desc.rng.uniform(-180., 180.), dtype=np.float32)
def random_output(sample_desc):
return np.array([sample_desc.rng.randint(300, 400), rng.randint(300, 400)])
video_test_cases = [
(dali.fn.rotate, {'angle': 45.}, []),
(dali.fn.rotate, {}, [ArgCb("angle", small_angle, False)]),
(dali.fn.rotate, {}, [ArgCb("angle", random_angle, False)]),
(dali.fn.rotate, {}, RotatePerFrameParamsProvider([ArgCb("angle", small_angle, True)])),
(dali.fn.rotate, {}, RotatePerFrameParamsProvider([ArgCb("angle", random_angle, True)])),
(dali.fn.rotate, {}, [ArgCb("angle", small_angle, True),
ArgCb("size", random_output, False)]),
]
rng = random.Random(42)
video_cases = get_video_input_cases("FHWC", rng, larger_shape=(512, 287))
input_cases = [
ArgData(ArgDesc(0, "F", "", "FHWC"), input_data)
for input_data in video_cases
]
yield from sequence_suite_helper(rng, input_cases, video_test_cases)
def test_3d_sequence():
rng = random.Random(42)
num_batches = 4
max_batch_size = 8
max_frames_num = 32
input_layout = "FDHWC"
np_rng = np.random.default_rng(42)
def get_random_sample():
num_frames = rng.randint(1, max_frames_num)
d, h, w = tuple(rng.randint(10, 50) for _ in range(3))
return np.int32(np_rng.uniform(0, 255, (num_frames, d, h, w, 3)))
def get_random_batch():
return [get_random_sample() for _ in range(rng.randint(1, max_batch_size))]
input_cases = [
ArgData(desc=ArgDesc(0, "F", "", input_layout),
data=[get_random_batch() for _ in range(num_batches)])
]
def random_angle(sample_desc):
return np.array(sample_desc.rng.uniform(-180., 180.), dtype=np.float32)
def random_axis(sample_desc):
return np.array([sample_desc.rng.uniform(-1, 1) for _ in range(3)], dtype=np.float32)
test_cases = [
(dali.fn.rotate, {'angle': 45., 'axis': np.array([1, 0, 0], dtype=np.float32)}, []),
(dali.fn.rotate, {'size': (50, 30, 20)}, [ArgCb("angle", random_angle, True),
ArgCb("axis", random_axis, True)]),
(dali.fn.rotate, {}, RotatePerFrameParamsProvider([ArgCb("angle", random_angle, True),
ArgCb("axis", random_axis, True)])),
]
yield from sequence_suite_helper(rng, input_cases, test_cases)
|
DALI-main
|
dali/test/python/operator_2/test_rotate.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali.types as types
from nvidia.dali import pipeline_def, fn
from nose2.tools import params
import numpy as np
from test_utils import as_array
devices = ('cpu', 'gpu')
data_1x1x2x2 = np.array(
[[[[1, 2],
[3, 4]]]], dtype=np.float32)
data_1x1x2x4 = np.array(
[[[[1, 2, 3, 4],
[5, 6, 7, 8]]]], dtype=np.float32)
data_1x1x4x4 = np.array(
[[[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]]]], dtype=np.float32)
def run_and_compare(expected, data, device, resize_fn):
@pipeline_def(batch_size=1, num_threads=3, device_id=0)
def pipe():
input_data = types.Constant(data, device=device)
return resize_fn(input_data)
p = pipe()
p.build()
out = p.run()
np.testing.assert_allclose(expected, as_array(out[0][0]), rtol=1e-3, atol=1e-7)
@params('cpu', 'gpu')
def test_resize_upsample_scales_nearest(device):
data = data_1x1x2x2
scales = np.array([1.0, 1.0, 2.0, 3.0], dtype=np.float32)
# from onnx.backend.test.case.node.resize import interpolate_nd, nearest_coeffs
# expected = interpolate_nd(data, lambda x, _: nearest_coeffs(x), scale_factors=scales)
expected = np.array(
[[[[1., 1., 1., 2., 2., 2.],
[1., 1., 1., 2., 2., 2.],
[3., 3., 3., 4., 4., 4.],
[3., 3., 3., 4., 4., 4.]]]], dtype=np.float32)
def resize_fn(input_data):
return fn.experimental.tensor_resize(input_data, scales=scales, alignment=0,
interp_type=types.INTERP_NN)
run_and_compare(expected, data, device, resize_fn)
@params('cpu', 'gpu')
def test_resize_downsample_scales_nearest(device):
data = data_1x1x2x4
scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
# from onnx.backend.test.case.node.resize import interpolate_nd, nearest_coeffs
# expected = interpolate_nd(data, lambda x, _: nearest_coeffs(x), scale_factors=scales)
expected = np.array(
[[[[1., 3.]]]], dtype=np.float32)
def resize_fn(input_data):
return fn.experimental.tensor_resize(input_data, scales=scales, alignment=0,
interp_type=types.INTERP_NN)
run_and_compare(expected, data, device, resize_fn)
@params('cpu', 'gpu')
def test_resize_upsample_sizes_nearest(device):
data = data_1x1x2x2
sizes = np.array([1., 1., 7., 8.], dtype=np.float32)
# from onnx.backend.test.case.node.resize import interpolate_nd, nearest_coeffs
# expected = interpolate_nd(data, lambda x, _: nearest_coeffs(x, mode='round_prefer_ceil'),
# output_size=sizes.astype(np.int64))
expected = np.array(
[[[[1., 1., 1., 1., 2., 2., 2., 2.],
[1., 1., 1., 1., 2., 2., 2., 2.],
[1., 1., 1., 1., 2., 2., 2., 2.],
[3., 3., 3., 3., 4., 4., 4., 4.],
[3., 3., 3., 3., 4., 4., 4., 4.],
[3., 3., 3., 3., 4., 4., 4., 4.],
[3., 3., 3., 3., 4., 4., 4., 4.]]]], dtype=np.float32)
def resize_fn(input_data):
return fn.experimental.tensor_resize(input_data, sizes=sizes, alignment=0,
interp_type=types.INTERP_NN)
run_and_compare(expected, data, device, resize_fn)
@params('cpu', 'gpu')
def test_resize_upsample_scales_linear(device):
data = data_1x1x2x2
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
# from onnx.backend.test.case.node.resize import interpolate_nd, linear_coeffs
# expected = interpolate_nd(data, lambda x, _: linear_coeffs(x),
# scale_factors=scales)
expected = np.array(
[[[[1., 1.25, 1.75, 2.],
[1.5, 1.75, 2.25, 2.5],
[2.5, 2.75, 3.25, 3.5],
[3., 3.25, 3.75, 4.]]]], dtype=np.float32)
def resize_fn(input_data):
return fn.experimental.tensor_resize(input_data, scales=scales, alignment=0,
interp_type=types.INTERP_LINEAR,
antialias=False)
run_and_compare(expected, data, device, resize_fn)
@params('cpu', 'gpu')
def test_resize_downsample_scales_linear(device):
data = data_1x1x2x4
scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
# from onnx.backend.test.case.node.resize import interpolate_nd, linear_coeffs
# expected = interpolate_nd(data, lambda x, _: linear_coeffs(x), scale_factors=scales)
expected = np.array(
[[[[2.6666665, 4.3333331]]]], dtype=np.float32)
def resize_fn(input_data):
return fn.experimental.tensor_resize(input_data, scales=scales, alignment=0,
interp_type=types.INTERP_LINEAR,
antialias=False)
run_and_compare(expected, data, device, resize_fn)
@params('cpu', 'gpu')
def test_resize_alignment(device):
data = data_1x1x2x4
scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
def resize_fn(input_data, alignment=0):
return fn.experimental.tensor_resize(
input_data, scales=scales, alignment=alignment,
interp_type=types.INTERP_LINEAR, antialias=False)
run_and_compare(np.array([[[[2.6666665, 4.3333331]]]], dtype=np.float32), data, device,
lambda in_data: resize_fn(in_data, alignment=0))
run_and_compare(np.array([[[[3.6666665, 5.3333331]]]], dtype=np.float32), data, device,
lambda in_data: resize_fn(in_data, alignment=0.5))
run_and_compare(np.array([[[[4.6666665, 6.3333331]]]], dtype=np.float32), data, device,
lambda in_data: resize_fn(in_data, alignment=1))
@params('cpu', 'gpu')
def test_resize_upsample_scales_cubic(device):
data = data_1x1x4x4
scales = np.array([1.0, 1.0, 2.0, 2.0], dtype=np.float32)
# from onnx.backend.test.case.node.resize import interpolate_nd, cubic_coeffs
# expected = interpolate_nd(data, lambda x, _: cubic_coeffs(x, A=-0.5), scale_factors=scales)
expected = np.array(
[[[[0.6484375, 0.8984375, 1.4453125, 1.96875, 2.46875,
2.9921875, 3.5390625, 3.7890625],
[1.6484375, 1.8984375, 2.4453125, 2.96875, 3.46875,
3.9921875, 4.5390625, 4.7890625],
[3.8359375, 4.0859375, 4.6328125, 5.15625, 5.65625,
6.1796875, 6.7265625, 6.9765625],
[5.9296875, 6.1796875, 6.7265625, 7.25, 7.75,
8.2734375, 8.8203125, 9.0703125],
[7.9296875, 8.1796875, 8.7265625, 9.25, 9.75,
10.2734375, 10.8203125, 11.0703125],
[10.0234375, 10.2734375, 10.8203125, 11.34375, 11.84375,
12.3671875, 12.9140625, 13.1640625],
[12.2109375, 12.4609375, 13.0078125, 13.53125, 14.03125,
14.5546875, 15.1015625, 15.3515625],
[13.2109375, 13.4609375, 14.0078125, 14.53125, 15.03125,
15.5546875, 16.1015625, 16.3515625]]]], dtype=np.float32)
def resize_fn(input_data):
return fn.experimental.tensor_resize(
input_data, scales=scales, alignment=0,
interp_type=types.INTERP_CUBIC, antialias=False)
run_and_compare(expected, data, device, resize_fn)
@params('cpu', 'gpu')
def test_resize_downsample_scales_cubic(device):
data = data_1x1x4x4
scales = np.array([1.0, 1.0, 0.8, 0.8], dtype=np.float32)
# from onnx.backend.test.case.node.resize import interpolate_nd, cubic_coeffs
# expected = interpolate_nd(data, lambda x, _: cubic_coeffs(x, A=-0.5), scale_factors=scales)
expected = np.array(
[[[[1.38574215, 2.68359369, 4.00683586],
[6.57714832, 7.87499986, 9.19824203],
[11.87011699, 13.16796853, 14.4912107]]]], dtype=np.float32)
def resize_fn(input_data):
return fn.experimental.tensor_resize(
input_data, scales=scales, alignment=0,
interp_type=types.INTERP_CUBIC, antialias=False)
run_and_compare(expected, data, device, resize_fn)
@params('cpu', 'gpu')
def test_resize_upsample_sizes_cubic(device):
data = data_1x1x4x4
sizes = np.array([1., 1., 9., 10.], dtype=np.float32)
# from onnx.backend.test.case.node.resize import interpolate_nd, cubic_coeffs
# expected = interpolate_nd(data, lambda x, _: cubic_coeffs(x, A=-0.5),
# output_size=sizes.astype(np.int64))
expected = np.array(
[[[[0.63671948, 0.76971948, 1.14771948, 1.60571948, 2.01021948,
2.41021948, 2.81471948, 3.27271948, 3.65071948, 3.78371948],
[1.36168519, 1.49468519, 1.87268519, 2.33068519, 2.73518519,
3.13518519, 3.53968519, 3.99768519, 4.37568519, 4.50868519],
[3.18610219, 3.31910219, 3.69710219, 4.15510219, 4.55960219,
4.95960219, 5.36410219, 5.82210219, 6.20010219, 6.33310219],
[5.14872222, 5.28172222, 5.65972222, 6.11772222, 6.52222222,
6.92222222, 7.32672222, 7.78472222, 8.16272222, 8.29572222],
[6.9265, 7.0595, 7.4375, 7.8955, 8.3,
8.7, 9.1045, 9.5625, 9.9405, 10.0735],
[8.70427778, 8.83727778, 9.21527778, 9.67327778, 10.07777778,
10.47777778, 10.88227778, 11.34027778, 11.71827778, 11.85127778],
[10.66689781, 10.79989781, 11.17789781, 11.63589781, 12.04039781,
12.44039781, 12.84489781, 13.30289781, 13.68089781, 13.81389781],
[12.49131481, 12.62431481, 13.00231481, 13.46031481, 13.86481481,
14.26481481, 14.66931481, 15.12731481, 15.50531481, 15.63831481],
[13.21628052, 13.34928052, 13.72728052, 14.18528052, 14.58978052,
14.98978052, 15.39428052, 15.85228052, 16.23028052, 16.36328052]]]], dtype=np.float32)
def resize_fn(input_data):
return fn.experimental.tensor_resize(
input_data, sizes=sizes, alignment=0,
interp_type=types.INTERP_CUBIC, antialias=False)
run_and_compare(expected, data, device, resize_fn)
@params('cpu', 'gpu')
def test_resize_downsample_sizes_cubic(device):
data = data_1x1x4x4
sizes = np.array([1., 1., 3., 3.], dtype=np.float32)
# from onnx.backend.test.case.node.resize import interpolate_nd, cubic_coeffs
# expected = interpolate_nd(data, lambda x, _: cubic_coeffs(x, A=-0.5),
# output_size=sizes.astype(np.int64))
expected = np.array(
[[[[1.54398148, 2.93518519, 4.32638889],
[7.1087963, 8.5, 9.8912037],
[12.67361111, 14.06481481, 15.45601852]]]], dtype=np.float32)
def resize_fn(input_data):
return fn.experimental.tensor_resize(input_data, sizes=sizes, alignment=0,
interp_type=types.INTERP_CUBIC, antialias=False)
run_and_compare(expected, data, device, resize_fn)
@params('cpu', 'gpu')
def test_resize_upsample_resize_only_1d(device):
data = data_1x1x2x2
scales = np.array([1.0, 1.0, 1.0, 3.0], dtype=np.float32)
expected = np.array(
[[[[1., 1., 1., 2., 2., 2.],
[3., 3., 3., 4., 4., 4.]]]], dtype=np.float32)
def resize_fn(input_data):
return fn.experimental.tensor_resize(input_data, scales=scales, alignment=0,
interp_type=types.INTERP_NN)
run_and_compare(expected, data, device, resize_fn)
@params('cpu', 'gpu')
def test_resize_upsample_resize_only_noop(device):
data = data_1x1x2x2
scales = np.array([1.0, 1.0, 1.0, 1.0], dtype=np.float32)
def resize_fn(input_data):
return fn.experimental.tensor_resize(input_data, scales=scales, alignment=0,
interp_type=types.INTERP_NN)
run_and_compare(data, data, device, resize_fn)
@params('cpu', 'gpu')
def test_resize_upsample_1d(device):
data = np.array([1.0, 2.0], dtype=np.float32)
scales = np.array([3.0], dtype=np.float32)
expected = np.array([1., 1., 1., 2., 2., 2.], dtype=np.float32)
def resize_fn(input_data):
return fn.experimental.tensor_resize(input_data, scales=scales, alignment=0,
interp_type=types.INTERP_NN)
run_and_compare(expected, data, device, resize_fn)
|
DALI-main
|
dali/test/python/operator_2/test_tensor_resize.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
from nvidia.dali import pipeline_def
import nvidia.dali.fn as fn
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import numpy as np
from numpy.testing import assert_array_equal
import os
from nose_utils import assert_raises
from nose2.tools import params
test_data_root = os.environ['DALI_EXTRA_PATH']
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
class ReshapePipeline(Pipeline):
def __init__(self, device, batch_size, relative, use_wildcard,
num_threads=3, device_id=0, num_gpus=1):
super(ReshapePipeline, self).__init__(batch_size, num_threads, device_id, seed=7865,
exec_async=True, exec_pipelined=True)
self.device = device
self.input = ops.readers.Caffe(path=caffe_db_folder, shard_id=device_id,
num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
W = 320
H = 224
self.resize = ops.Resize(device="cpu", resize_x=W, resize_y=H)
WC = -1 if use_wildcard else W * 3
if relative:
rel_shape = (-1, 3) if use_wildcard else (1, 3)
self.reshape = ops.Reshape(device=device, rel_shape=rel_shape, layout="ab")
else:
self.reshape = ops.Reshape(device=device, shape=(H, WC), layout="ab")
def define_graph(self):
jpegs, labels = self.input(name="Reader")
images = self.resize(self.decode(jpegs))
if self.device == "gpu":
images = images.gpu()
reshaped = self.reshape(images)
# `images+0` creates a (no-op) arithmetic expression node - this prevents the
# original `images` node from being marked as pipeline output
return [images + 0, reshaped]
def CollapseChannels(image):
new_shape = np.array([image.shape[0], image.shape[1] * image.shape[2]]).astype(np.int32)
return new_shape
def CollapseChannelsWildcard(image):
new_shape = np.array([image.shape[0], -1]).astype(np.int32)
return new_shape
class ReshapeWithInput(Pipeline):
def __init__(self, device, batch_size, use_wildcard, num_threads=3, device_id=0, num_gpus=1):
super(ReshapeWithInput, self).__init__(batch_size, num_threads, device_id, seed=7865,
exec_async=False, exec_pipelined=False)
self.device = device
self.input = ops.readers.Caffe(path=caffe_db_folder, shard_id=device_id,
num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
fn = CollapseChannelsWildcard if use_wildcard else CollapseChannels
self.gen_shapes = ops.PythonFunction(function=fn)
self.reshape = ops.Reshape(device=device, layout="ab")
def define_graph(self):
jpegs, labels = self.input(name="Reader")
images_cpu = self.decode(jpegs)
shapes = self.gen_shapes(images_cpu)
images = images_cpu.gpu() if self.device == "gpu" else images_cpu
reshaped = self.reshape(images, shapes)
return [images, reshaped]
def MakeTallFunc(relative, wildcard):
def func(image):
if relative:
return np.array([-1 if wildcard else 2, 0.5, 1]).astype(np.float32)
else:
h, w, c = image.shape
return np.array([-1 if wildcard else 2 * h, w / 2, c]).astype(np.int32)
return func
class ReshapeWithArgInput(Pipeline):
def __init__(self, device, batch_size, relative, use_wildcard,
num_threads=3, device_id=0, num_gpus=1):
super(ReshapeWithArgInput, self).__init__(batch_size, num_threads, device_id, seed=7865,
exec_async=False, exec_pipelined=False)
self.device = device
self.input = ops.readers.Caffe(path=caffe_db_folder, shard_id=device_id,
num_shards=num_gpus)
self.resize = ops.Resize(device="cpu")
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.gen_shapes = ops.PythonFunction(function=MakeTallFunc(relative, use_wildcard))
self.reshape = ops.Reshape(device=device)
self.relative = relative
def define_graph(self):
jpegs, labels = self.input(name="Reader")
images_cpu = self.decode(jpegs)
rng = ops.random.Uniform(range=[100, 128])
cast = ops.Cast(dtype=types.INT32)
widths = cast(rng()) * 2.0
heights = cast(rng()) * 2.0
images_cpu = self.resize(images_cpu, resize_x=widths, resize_y=heights)
shapes = self.gen_shapes(images_cpu)
images = images_cpu.gpu() if self.device == "gpu" else images_cpu
if self.relative:
reshaped = self.reshape(images, rel_shape=shapes)
else:
reshaped = self.reshape(images, shape=shapes)
return [images, reshaped]
def verify_tensor_layouts(imgs, reshaped):
assert imgs.layout() == "HWC"
assert reshaped.layout() == "ab"
for i in range(len(imgs)):
assert imgs[i].layout() == "HWC"
assert reshaped[i].layout() == "ab"
def verify_flatten(imgs, reshaped, src_shape=None):
assert imgs.layout() == "HWC"
assert reshaped.layout() == "ab"
for i in range(len(imgs)):
if src_shape is not None:
assert imgs.at(i).shape == src_shape
img_shape = imgs.at(i).shape
# collapse width and channels
ref_shape = (img_shape[0], img_shape[1] * img_shape[2])
assert reshaped.at(i).shape == ref_shape
assert_array_equal(imgs.at(i).flatten(), reshaped.at(i).flatten())
def verify_make_tall(imgs, reshaped, src_shape=None):
assert imgs.layout() == "HWC"
assert reshaped.layout() == "HWC"
for i in range(len(imgs)):
if src_shape is not None:
assert imgs.at(i).shape == src_shape
img_shape = imgs.at(i).shape
# collapse width and channels
ref_shape = (img_shape[0] * 2, img_shape[1] // 2, 3)
assert reshaped.at(i).shape == ref_shape
assert_array_equal(imgs.at(i).flatten(), reshaped.at(i).flatten())
def check_reshape(device, batch_size, relative, use_wildcard):
pipe = ReshapePipeline(device, batch_size, relative, use_wildcard)
pipe.build()
for iter in range(10):
imgs, reshaped = pipe.run()
if device == "gpu":
verify_tensor_layouts(imgs, reshaped)
imgs = imgs.as_cpu()
reshaped = reshaped.as_cpu()
verify_flatten(imgs, reshaped, (224, 320, 3))
def check_reshape_with_input(device, batch_size, use_wildcard):
pipe = ReshapeWithInput(device, batch_size, use_wildcard)
pipe.build()
for iter in range(2):
imgs, reshaped = pipe.run()
if device == "gpu":
verify_tensor_layouts(imgs, reshaped)
imgs = imgs.as_cpu()
reshaped = reshaped.as_cpu()
verify_flatten(imgs, reshaped)
def check_reshape_with_arg_input(device, batch_size, relative, use_wildcard):
pipe = ReshapeWithArgInput(device, batch_size, relative, use_wildcard)
pipe.build()
for iter in range(2):
imgs, reshaped = pipe.run()
if device == "gpu":
imgs = imgs.as_cpu()
reshaped = reshaped.as_cpu()
verify_make_tall(imgs, reshaped)
def test_reshape_arg():
for device in ["cpu", "gpu"]:
for batch_size in [16]:
for relative in [False, True]:
for use_wildcard in [False, True]:
yield check_reshape, device, batch_size, relative, use_wildcard
def test_reshape_input():
for device in ["cpu", "gpu"]:
for batch_size in [16]:
for use_wildcard in [False, True]:
yield check_reshape_with_input, device, batch_size, use_wildcard
def test_reshape_arg_input():
for device in ["cpu", "gpu"]:
for batch_size in [16]:
for relative in [False, True]:
for use_wildcard in [False, True]:
yield check_reshape_with_arg_input, device, batch_size, relative, use_wildcard
class ReinterpretPipelineWithDefaultShape(Pipeline):
def __init__(self, device, batch_size, num_threads=3, device_id=0, num_gpus=1):
super(ReinterpretPipelineWithDefaultShape, self).__init__(
batch_size, num_threads, device_id, seed=7865, exec_async=True, exec_pipelined=True
)
self.device = device
self.ext_src = ops.ExternalSource()
self.reinterpret = ops.Reinterpret(device=device, dtype=types.INT32)
def define_graph(self):
input = self.input = self.ext_src()
if self.device == "gpu":
input = input.gpu()
reinterpreted = self.reinterpret(input)
# `input+0` creates a (no-op) arithmetic expression node - this prevents the
# original `input` node from being marked as pipeline output
return [input, reinterpreted]
def iter_setup(self):
data = []
for i in range(self.batch_size):
shape = np.random.randint(4, 20, size=[2])
shape[1] &= -4 # align to 4
data.append(np.random.randint(0, 255, shape, dtype=np.uint8))
self.feed_input(self.input, data)
def _test_reinterpret_default_shape(device):
np.random.seed(31337)
batch_size = 4
pipe = ReinterpretPipelineWithDefaultShape(device, batch_size)
pipe.build()
pipe_outs = pipe.run()
in_batch = pipe_outs[0].as_cpu() if device == "gpu" else pipe_outs[0]
out_batch = pipe_outs[1].as_cpu() if device == "gpu" else pipe_outs[1]
for i in range(batch_size):
ref = in_batch.at(i).view(dtype=np.int32)
out = out_batch.at(i)
assert_array_equal(ref, out)
def test_reinterpret_default_shape():
for device in ["cpu", "gpu"]:
yield _test_reinterpret_default_shape, device
class ReinterpretPipelineWildcardDim(Pipeline):
def __init__(self, device, batch_size, num_threads=3, device_id=0, num_gpus=1):
super(ReinterpretPipelineWildcardDim, self).__init__(
batch_size, num_threads, device_id, seed=7865, exec_async=True, exec_pipelined=True
)
self.device = device
self.ext_src = ops.ExternalSource()
self.reinterpret = ops.Reinterpret(device=device, shape=(20, 2), dtype=types.INT32)
def define_graph(self):
input = self.input = self.ext_src()
if self.device == "gpu":
input = input.gpu()
reinterpreted = self.reinterpret(input)
# `input+0` creates a (no-op) arithmetic expression node - this prevents the
# original `input` node from being marked as pipeline output
return [input, reinterpreted]
def iter_setup(self):
data = [np.random.randint(0, 255, [10, 16], dtype=np.uint8) for i in range(self.batch_size)]
self.feed_input(self.input, data)
def _test_reinterpret_wildcard_shape(device):
np.random.seed(31337)
batch_size = 4
pipe = ReinterpretPipelineWildcardDim(device, batch_size)
pipe.build()
pipe_outs = pipe.run()
in_batch = pipe_outs[0].as_cpu() if device == "gpu" else pipe_outs[0]
out_batch = pipe_outs[1].as_cpu() if device == "gpu" else pipe_outs[1]
for i in range(batch_size):
ref = in_batch.at(i).view(dtype=np.int32).reshape([20, 2])
out = out_batch.at(i)
assert_array_equal(ref, out)
def test_reinterpret_wildcard_shape():
for device in ["cpu", "gpu"]:
yield _test_reinterpret_wildcard_shape, device
def get_data(shapes):
return [np.empty(shape, dtype=np.uint8) for shape in shapes]
@pipeline_def
def reshape_pipe(shapes, src_dims=None, rel_shape=None):
data = fn.external_source(lambda: get_data(shapes), batch=True, device="cpu")
return fn.reshape(data, src_dims=src_dims, rel_shape=rel_shape)
def _testimpl_reshape_src_dims_arg(src_dims, rel_shape, shapes, expected_out_shapes):
batch_size = len(shapes)
pipe = reshape_pipe(batch_size=batch_size, num_threads=1, device_id=0,
shapes=shapes, src_dims=src_dims, rel_shape=rel_shape)
pipe.build()
for _ in range(3):
outs = pipe.run()
for i in range(batch_size):
out_arr = np.array(outs[0][i])
assert out_arr.shape == expected_out_shapes[i]
def test_reshape_src_dims_arg():
# src_dims, rel_shape, shapes, expected_out_shapes
args = [
([0, 1], None, [[200, 300, 1], [300, 400, 1]], [(200, 300), (300, 400)]),
([1, 2, 0], None, [[10, 20, 30], [30, 20, 10], [2, 1, 3]],
[(20, 30, 10), (20, 10, 30), (1, 3, 2)]),
([1], None, [[1, 2, 1], [1, 3, 1]], [(2,), (3,)]),
([2, -1, 1, 0], None, [[10, 20, 30]], [(30, 1, 20, 10)]),
([-1, 2], None, [[1, 1, 30], [1, 1, 70]], [(1, 30), (1, 70)]),
([2, 0, 1], [0.5, 0.5, -1], [[200, 300, 100]], [(50, 100, 1200)]),
([], None, [[1]], [()]),
]
for src_dims, rel_shape, shapes, expected_out_shapes in args:
yield _testimpl_reshape_src_dims_arg, src_dims, rel_shape, shapes, expected_out_shapes
if rel_shape is not None:
shape_inp = fn.constant(fdata=rel_shape, dtype=types.FLOAT)
yield _testimpl_reshape_src_dims_arg, src_dims, shape_inp, shapes, expected_out_shapes
@params(
([2, 0], None, [[20, 10, 20]],
r"The volume of the new shape should match the one of the original shape\. "
r"Requested a shape with \d* elements but the original shape has \d* elements\."),
([2, 0, 1], [1, -1], [[1, 2, 3]],
r"`src_dims` and `rel_shape` have different lengths: \d* vs \d*"),
([0, 1, 3], None, [1, 2, 3], ".*is out of bounds.*"),
)
def test_reshape_src_dims_throw_error(src_dims, rel_shape, shapes, err_regex):
pipe = reshape_pipe(batch_size=len(shapes), num_threads=1, device_id=0, shapes=shapes,
src_dims=src_dims, rel_shape=rel_shape)
pipe.build()
with assert_raises(RuntimeError, regex=err_regex):
pipe.run()
@params([1, 1, -1], np.float32([1, 1, -1]))
def test_trailing_wildcard(rel_shape):
shapes = [[480, 640], [320, 240]]
pipe = reshape_pipe(batch_size=len(shapes), num_threads=1, device_id=0, shapes=shapes,
rel_shape=rel_shape)
pipe.build()
out, = pipe.run()
assert out[0].shape() == [480, 640, 1]
assert out[1].shape() == [320, 240, 1]
@params([1, -1, 1], np.float32([1, -1, 1]))
def test_invalid_wildcard(rel_shape):
shapes = [[480, 640], [320, 240]]
pipe = reshape_pipe(batch_size=len(shapes), num_threads=1, device_id=0, shapes=shapes,
rel_shape=rel_shape)
pipe.build()
err_glob = "*`rel_shape` has more elements (3) than*dimensions in the input (2)*" \
"use `src_dims`*"
with assert_raises(RuntimeError, glob=err_glob):
pipe.run()
def test_wildcard_zero_volume():
shapes = [[480, 640], [320, 0]]
pipe = reshape_pipe(batch_size=len(shapes), num_threads=1, device_id=0, shapes=shapes,
rel_shape=[-1, 1])
pipe.build()
err_glob = "*Cannot infer*dimension 0 when the volume*is 0. Input shape:*320 x 0"
with assert_raises(RuntimeError, glob=err_glob):
pipe.run()
|
DALI-main
|
dali/test/python/operator_2/test_reshape.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nose_utils # noqa:F401
import numpy as np
import nvidia.dali as dali
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import nvidia.dali.math as math
np.random.seed(4321)
def check_random_mask_pixel(ndim=2, batch_size=3,
min_extent=20, max_extent=50):
pipe = dali.pipeline.Pipeline(batch_size=batch_size, num_threads=4, device_id=0, seed=1234)
with pipe:
# Input mask
in_shape_dims = [fn.cast(fn.random.uniform(range=(min_extent, max_extent + 1)),
dtype=types.INT32) for _ in range(ndim)]
in_shape = fn.stack(*in_shape_dims)
in_mask = fn.cast(fn.random.uniform(range=(0, 2), shape=in_shape), dtype=types.INT32)
# > 0
fg_pixel1 = fn.segmentation.random_mask_pixel(in_mask, foreground=1)
# >= 0.99
fg_pixel2 = fn.segmentation.random_mask_pixel(in_mask, foreground=1, threshold=0.99)
# == 2
fg_pixel3 = fn.segmentation.random_mask_pixel(in_mask, foreground=1, value=2)
rnd_pixel = fn.segmentation.random_mask_pixel(in_mask, foreground=0)
coin_flip = fn.random.coin_flip(probability=0.7)
fg_biased = fn.segmentation.random_mask_pixel(in_mask, foreground=coin_flip)
# Demo purposes: Taking a random pixel and produce a valid anchor to feed slice
# We want to force the center adjustment, thus the large crop shape
crop_shape = in_shape - 2
anchor = fn.cast(fg_pixel1, dtype=types.INT32) - crop_shape // 2
anchor = math.min(math.max(0, anchor), in_shape - crop_shape)
out_mask = fn.slice(in_mask, anchor, crop_shape, axes=tuple(range(ndim)))
pipe.set_outputs(in_mask, fg_pixel1, fg_pixel2, fg_pixel3, rnd_pixel, coin_flip, fg_biased,
anchor, crop_shape, out_mask)
pipe.build()
for iter in range(3):
outputs = pipe.run()
for idx in range(batch_size):
in_mask = outputs[0].at(idx)
fg_pixel1 = outputs[1].at(idx).tolist()
fg_pixel2 = outputs[2].at(idx).tolist()
fg_pixel3 = outputs[3].at(idx).tolist()
rnd_pixel = outputs[4].at(idx).tolist()
coin_flip = outputs[5].at(idx).tolist()
fg_biased = outputs[6].at(idx).tolist()
anchor = outputs[7].at(idx).tolist()
crop_shape = outputs[8].at(idx).tolist()
out_mask = outputs[9].at(idx)
assert in_mask[tuple(fg_pixel1)] > 0
assert in_mask[tuple(fg_pixel2)] > 0.99
assert in_mask[tuple(fg_pixel3)] == 2
assert in_mask[tuple(fg_biased)] > 0 or not coin_flip
for d in range(ndim):
assert 0 <= anchor[d] and anchor[d] + crop_shape[d] <= in_mask.shape[d]
assert out_mask.shape == tuple(crop_shape)
def test_random_mask_pixel():
for ndim in (2, 3):
yield check_random_mask_pixel, ndim
|
DALI-main
|
dali/test/python/operator_2/test_segmentation_random_mask_pixel.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nose_utils # noqa: F401
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import numpy as np
import math
import os
import cv2
from sequences_test_utils import video_suite_helper, SampleDesc, ArgCb
from test_utils import compare_pipelines
import random
test_data_root = os.environ['DALI_EXTRA_PATH']
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
def gen_transform(angle, zoom, dst_cx, dst_cy, src_cx, src_cy):
t1 = np.array([[1, 0, -dst_cx], [0, 1, -dst_cy], [0, 0, 1]])
cosa = math.cos(angle) / zoom
sina = math.sin(angle) / zoom
r = np.array([
[cosa, -sina, 0],
[sina, cosa, 0],
[0, 0, 1]])
t2 = np.array([[1, 0, src_cx], [0, 1, src_cy], [0, 0, 1]])
return (np.matmul(t2, np.matmul(r, t1)))[0:2, 0:3]
def gen_transforms(n, step):
a = 0.0
step = step * (math.pi / 180)
out = np.zeros([n, 2, 3])
for i in range(n):
out[i, :, :] = gen_transform(a, 2, 160, 120, 100, 100)
a = a + step
return out.astype(np.float32)
def ToCVMatrix(matrix):
offset = np.matmul(matrix, np.array([[0.5], [0.5], [1]]))
result = matrix.copy()
result[0][2] = offset[0] - 0.5
result[1][2] = offset[1] - 0.5
return result
def CVWarp(output_type, input_type, warp_matrix=None, inv_map=False):
def warp_fn(img, matrix):
size = (320, 240)
matrix = ToCVMatrix(matrix)
if output_type == types.FLOAT or input_type == types.FLOAT:
img = np.float32(img)
fill = 12.5 if output_type == types.FLOAT else 42
out = cv2.warpAffine(img,
matrix,
size,
borderMode=cv2.BORDER_CONSTANT,
borderValue=[fill, fill, fill],
flags=((cv2.INTER_LINEAR | cv2.WARP_INVERSE_MAP)
if inv_map else cv2.INTER_LINEAR))
if output_type == types.UINT8 and input_type == types.FLOAT:
out = np.uint8(np.clip(out, 0, 255))
return out
if warp_matrix:
m = np.array(warp_matrix)
def warp_fixed(img):
return warp_fn(img, m)
return warp_fixed
return warp_fn
class WarpPipeline(Pipeline):
def __init__(self, device, batch_size, output_type, input_type, use_input, num_threads=3,
device_id=0, num_gpus=1, inv_map=False):
super(WarpPipeline, self).__init__(batch_size, num_threads, device_id, seed=7865,
exec_async=False, exec_pipelined=False)
self.use_input = use_input
self.use_dynamic_size = use_input # avoid Cartesian product
self.name = device
self.input = ops.readers.Caffe(path=caffe_db_folder, shard_id=device_id,
num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
if input_type != types.UINT8:
self.cast = ops.Cast(device=device, dtype=input_type)
else:
self.cast = None
static_size = None if self.use_dynamic_size else (240, 320)
fill = 12.5 if output_type == types.FLOAT else 42
output_type_arg = output_type if output_type != input_type else None
if use_input:
self.transform_source = ops.ExternalSource(
lambda: gen_transforms(self.max_batch_size, 10))
self.warp = ops.WarpAffine(device=device,
size=static_size,
fill_value=fill,
dtype=output_type_arg,
inverse_map=inv_map)
else:
warp_matrix = (0.1, 0.9, 10, 0.8, -0.2, -20)
self.warp = ops.WarpAffine(device=device,
size=static_size,
matrix=warp_matrix,
fill_value=fill,
dtype=output_type_arg,
inverse_map=inv_map)
self.iter = 0
def define_graph(self):
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
if self.warp.device == "gpu":
images = images.gpu()
if self.cast:
images = self.cast(images)
dynamic_size = types.Constant(np.array([240, 320],
dtype=np.float32)) if self.use_dynamic_size else None
if self.use_input:
transform = self.transform_source()
outputs = self.warp(images, transform, size=dynamic_size)
else:
outputs = self.warp(images, size=dynamic_size)
return outputs
class CVPipeline(Pipeline):
def __init__(self, batch_size, output_type, input_type, use_input, num_threads=3, device_id=0,
num_gpus=1, inv_map=False):
super(CVPipeline, self).__init__(batch_size, num_threads, device_id, seed=7865,
exec_async=False, exec_pipelined=False)
self.use_input = use_input
self.name = "cv"
self.input = ops.readers.Caffe(path=caffe_db_folder, shard_id=device_id,
num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
if self.use_input:
self.transform_source = ops.ExternalSource(
lambda: gen_transforms(self.max_batch_size, 10))
self.warp = ops.PythonFunction(
function=CVWarp(output_type, input_type, inv_map=inv_map),
output_layouts="HWC")
else:
self.warp = ops.PythonFunction(
function=CVWarp(output_type, input_type, [[0.1, 0.9, 10], [0.8, -0.2, -20]],
inv_map),
output_layouts="HWC")
self.iter = 0
def define_graph(self):
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
if self.use_input:
self.transform = self.transform_source()
outputs = self.warp(images, self.transform)
else:
outputs = self.warp(images)
return outputs
def compare(pipe1, pipe2, max_err):
epoch_size = pipe1.epoch_size("Reader")
batch_size = pipe1.max_batch_size
niter = (epoch_size + batch_size - 1) // batch_size
compare_pipelines(pipe1, pipe2, batch_size, niter, max_allowed_error=max_err)
io_types = [
(types.UINT8, types.UINT8),
(types.UINT8, types.FLOAT),
(types.FLOAT, types.UINT8),
(types.FLOAT, types.FLOAT)
]
def test_vs_cv():
def impl(device, batch_size, use_input, otype, itype, inv_map):
cv_pipeline = CVPipeline(batch_size, otype, itype, use_input, inv_map=inv_map)
cv_pipeline.build()
cpu_pipeline = WarpPipeline(device, batch_size, otype, itype, use_input, inv_map=inv_map)
cpu_pipeline.build()
compare(cv_pipeline, cpu_pipeline, 8)
random.seed(1009)
for device in ['cpu', 'gpu']:
for use_input in [False, True]:
for (itype, otype) in io_types:
inv_map = random.choice([False, True])
batch_size = random.choice([1, 4, 19])
yield impl, device, batch_size, use_input, otype, itype, inv_map
def test_gpu_vs_cpu():
def impl(batch_size, use_input, otype, itype, inv_map):
cpu_pipeline = WarpPipeline("cpu", batch_size, otype, itype, use_input, inv_map=inv_map)
cpu_pipeline.build()
gpu_pipeline = WarpPipeline("gpu", batch_size, otype, itype, use_input, inv_map=inv_map)
gpu_pipeline.build()
random.seed(1006)
for use_input in [False, True]:
for (itype, otype) in io_types:
inv_map = random.choice([False, True])
batch_size = random.choice([1, 4, 19])
yield impl, batch_size, use_input, otype, itype, inv_map
def _test_extremely_large_data(device):
in_size = 30000
out_size = 10
channels = 3
def get_data():
out = np.full([in_size, in_size, channels], 42, dtype=np.uint8)
for c in range(channels):
out[in_size - 1, in_size - 1, c] = c
return [out]
pipe = Pipeline(1, 3, 0, prefetch_queue_depth=1)
input = fn.external_source(source=get_data, device=device)
rotated = fn.warp_affine(input, matrix=[-1, 0, in_size, 0, -1, in_size], fill_value=255.0,
size=[out_size, out_size], interp_type=types.INTERP_NN)
pipe.set_outputs(rotated)
pipe.build()
out = None
try:
out, = pipe.run()
except RuntimeError as e:
if "bad_alloc" in str(e):
print("Skipping test due to out-of-memory error:", e)
return
raise
except MemoryError as e:
print("Skipping test due to out-of-memory error:", e)
return
if device == "cpu":
out = out.at(0)
else:
out = out.as_cpu().at(0)
assert out.shape == (out_size, out_size, channels)
for c in range(channels):
assert out[0, 0, c] == c
def test_extremely_large_data():
for device in ["cpu", "gpu"]:
yield _test_extremely_large_data, device
def test_video():
rng = random.Random(42)
def random_flip_mx(sample_desc):
x, y = sample_desc.rng.choice([(-1, -1), (1, -1), (-1, 1)])
_, h, w, _ = sample_desc.sample.shape # assuming FHWC layout
return np.array([
[x, 0, 0 if x == 1 else w],
[0, y, 0 if y == 1 else h],
[0, 0, 1]], dtype=np.float32)
def random_translate_mx(sample_desc):
_, h, w, _ = sample_desc.sample.shape # assuming FHWC layout
return np.array([
[1, 0, sample_desc.rng.uniform(-w / 2, w / 2)],
[0, 1, rng.uniform(-h / 2, h / 2)],
[0, 0, 1]], dtype=np.float32)
def random_scale_mx(sample_desc):
def rand_scale():
return sample_desc.rng.uniform(0.25, 4)
return np.array([
[rand_scale(), 0, 0],
[0, rand_scale(), 0],
[0, 0, 1]], dtype=np.float32)
def random_rotate_mx(sample_desc):
angle = math.radians(sample_desc.rng.uniform(-90, 90))
c = np.cos(angle)
s = np.sin(angle)
return np.array([
[c, -s, 0],
[s, c, 0],
[0, 0, 1]], dtype=np.float32)
def random_mx(sample_desc):
m = np.eye(3, dtype=np.float32)
for transformation in [
random_flip_mx, random_translate_mx, random_scale_mx, random_rotate_mx
]:
if sample_desc.rng.choice([0, 1]):
m = np.matmul(m, transformation(sample_desc))
return m[0:2, :]
def output_size(sample_desc):
_, h, w, _ = sample_desc.sample.shape # assuming FHWC layout
rng = sample_desc.rng
return np.array([h * rng.uniform(0.5, 2), w * rng.uniform(0.5, 2)], dtype=np.float32)
video_test_cases = [
(fn.warp_affine, {
"matrix": random_rotate_mx(SampleDesc(rng, 0, 0, 0, None))[0:2, :]
}, []),
(fn.warp_affine, {}, [ArgCb("matrix", random_mx, False)]),
(fn.warp_affine, {}, [ArgCb("matrix", random_mx, True)]),
(fn.warp_affine, {}, [ArgCb("matrix", random_mx, False),
ArgCb("size", output_size, False)]),
(fn.warp_affine, {}, [ArgCb("matrix", random_mx, True),
ArgCb("size", output_size, False)]),
(fn.warp_affine, {}, [ArgCb(1, random_mx, True, dest_device="cpu")]),
(fn.warp_affine, {}, [ArgCb(1, random_mx, True, dest_device="gpu")], ["gpu"]),
(fn.warp_affine, {}, [ArgCb(1, random_mx, False, dest_device="cpu")]),
(fn.warp_affine, {}, [ArgCb(1, random_mx, False, dest_device="gpu")], ["gpu"]),
]
yield from video_suite_helper(video_test_cases, test_channel_first=False, expand_channels=False,
rng=rng)
|
DALI-main
|
dali/test/python/operator_2/test_warp.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
import nvidia.dali as dali
import nvidia.dali.fn as fn
import os.path
import unittest
import time
from nose2.tools import params
from nvidia.dali.pipeline.experimental import pipeline_def
from nvidia.dali.types import DALIInterpType
test_data_root = os.environ['DALI_EXTRA_PATH']
data_dir = os.path.join(test_data_root, 'db', 'single', 'jpeg')
rng = np.random.default_rng()
def update_map(mode, shape, nimages=1):
"""
Code for map calculation.
Based on https://github.com/opencv/opencv/blob/3.4/samples/python/tutorial_code/ImgTrans/remap/Remap_Demo.py # noqa
:param mode: One of: 'identity', 'xflip', 'yflip', 'xyflip', 'random'
:param shape: HWC shape of a sample.
:param nimages: Number of maps to be generated for every axis.
:return: tuple of 2 ndarrays (mapx: [nimages, H, W, C], mapy: [nimages, H, W, C])
"""
mapsx = []
mapsy = []
for _ in range(nimages):
map_x = np.tile(np.arange(shape[1]), [shape[0], 1])
map_y = np.tile(np.arange(shape[0])[:, np.newaxis], [1, shape[1]])
if mode == 'identity':
pass
elif mode == 'xflip':
map_x = shape[1] - map_x
elif mode == 'yflip':
map_y = shape[0] - map_y
elif mode == 'xyflip':
map_x = shape[1] - map_x
map_y = shape[0] - map_y
elif mode == 'random':
map_x = rng.uniform(low=0, high=map_x.shape[1] + 0, size=shape)
map_y = rng.uniform(low=0, high=map_y.shape[0] + 0, size=shape)
else:
raise ValueError("Unknown map mode.")
mapsx.append(map_x)
mapsy.append(map_y)
return np.array(mapsx, dtype=np.float32), np.array(mapsy, dtype=np.float32)
def _cv_remap(img, mapx, mapy):
return cv2.remap(img, mapx, mapy, cv2.INTER_NEAREST, cv2.BORDER_CONSTANT, 0)
@pipeline_def
def remap_pipe(remap_op, maps_data, img_size):
"""
Returns either a reference pipeline or a pipeline under test.
If the remap_op argument is 'dali', this function returns a DALI pipeline under test.
If the remap_op argument is 'cv', this function returns a reference DALI pipeline.
:param remap_op: 'dali' or 'cv'.
:param maps_data: List of ndarrays, which contains data for the remap parameters (maps).
:param img_size: Shape of the remap parameters, but without the channels value (only spatial).
:return: DALI Pipeline
"""
img, _ = fn.readers.file(file_root=data_dir)
img = fn.decoders.image(img)
img = fn.resize(img, size=img_size)
mapx, mapy = fn.external_source(source=maps_data, batch=True, cycle=True, num_outputs=2)
if remap_op == 'dali':
return fn.experimental.remap(img.gpu(), mapx.gpu(), mapy.gpu(),
interp=DALIInterpType.INTERP_NN, device='gpu',
pixel_origin="center")
elif remap_op == 'cv':
return fn.python_function(img, mapx, mapy, function=_cv_remap)
else:
raise ValueError("Unknown remap operator.")
class RemapTest(unittest.TestCase):
def setUp(self):
self.img_size = (480, 640)
self.batch_size = 64
self.common_dali_pipe_params = {
"batch_size": self.batch_size,
"num_threads": 1,
"device_id": 0,
}
@params('identity', 'xflip', 'yflip', 'xyflip', 'random')
def test_remap(self, map_mode):
maps = [update_map(mode=map_mode, shape=self.img_size, nimages=self.batch_size)]
dpipe = remap_pipe('dali', maps, self.img_size, **self.common_dali_pipe_params)
cpipe = remap_pipe('cv', maps, self.img_size, exec_async=False, exec_pipelined=False,
**self.common_dali_pipe_params)
self._compare_pipelines_pixelwise(dpipe, cpipe, N_iterations=2, eps=.01)
def benchmark_remap_against_cv(self, map_mode):
import torch.cuda.nvtx as nvtx
nvtx.range_push("Benchmark against OpenCV")
maps = [update_map(mode=map_mode, shape=self.img_size, nimages=self.batch_size)]
dpipe = remap_pipe('dali', maps, self.img_size, exec_async=False, exec_pipelined=False,
**self.common_dali_pipe_params, prefetch_queue_depth=1)
cpipe = remap_pipe('cv', maps, self.img_size, exec_async=False, exec_pipelined=False,
**self.common_dali_pipe_params, prefetch_queue_depth=1)
dpipe.build()
cpipe.build()
dtime = self._measure_time(dpipe.run)
ctime = self._measure_time(cpipe.run)
nvtx.range_pop()
print(f"DALI Pipeline average time: {dtime}. OpenCV Pipeline average time: {ctime}.")
def benchmark_remap_isolated(self, map_mode):
import torch.cuda.nvtx as nvtx
nvtx.range_push("Benchmark isolated")
maps = [update_map(mode=map_mode, shape=self.img_size, nimages=self.batch_size)]
dpipe = remap_pipe('dali', maps, self.img_size, **self.common_dali_pipe_params,
prefetch_queue_depth=1)
dpipe.build()
avg_time = self._measure_time(dpipe.run)
nvtx.range_pop()
print(f"DALI Pipeline average execution time: {avg_time} seconds.")
def _compare_pipelines_pixelwise(self, pipe1, pipe2, N_iterations, eps=.01):
pipe1.build()
pipe2.build()
for _ in range(N_iterations):
out1 = pipe1.run()
out2 = pipe2.run()
self.assertTrue(
len(out1) == len(out2),
f"Numbers of outputs in the pipelines does not match: {len(out1)} vs {len(out2)}.")
for i in range(len(out1)):
out1_data = out1[i].as_cpu() \
if isinstance(out1[i][0], dali.backend_impl.TensorGPU) else out1[i]
out2_data = out2[i].as_cpu() \
if isinstance(out2[i][0], dali.backend_impl.TensorGPU) else out2[i]
for sample1, sample2 in zip(out1_data, out2_data):
s1 = np.array(sample1)
s2 = np.array(sample2)
self.assertTrue(s1.shape == s2.shape,
f"Sample shapes do not match: {s1.shape} vs {s2.shape}")
noutliers = self._count_outlying_pixels(s1, s2)
size = np.prod(s1.shape[:-1])
self.assertTrue(
noutliers / size < eps,
f"Test failed. Actual error: {noutliers / size}, expected: {eps}.")
@staticmethod
def _measure_time(func, n_iterations=30):
times = []
for _ in range(n_iterations):
start = time.perf_counter()
func()
stop = time.perf_counter()
times.append(stop - start)
return np.mean(np.array(times))
@staticmethod
def _count_outlying_pixels(sample1, sample2):
eq = sample1 != sample2
return np.count_nonzero(np.sum(eq, axis=2))
|
DALI-main
|
dali/test/python/operator_2/test_remap.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.fn as fn
import numpy as np
from functools import partial
from test_utils import compare_pipelines
from test_utils import RandomlyShapedDataIterator
SEED = 12345
def preemph_func(border, coeff, signal):
in_shape = signal.shape
assert len(in_shape) == 1 # 1D
out = np.copy(signal)
# nothing to do for border == 'zero'
if border == 'clamp':
out[0] -= coeff * signal[0]
elif border == 'reflect':
out[0] -= coeff * signal[1]
out[1:] -= coeff * signal[0:in_shape[0] - 1]
return out
class PreemphasisPipeline(Pipeline):
def __init__(self, device, batch_size, iterator, border='clamp', preemph_coeff=0.97,
per_sample_coeff=False, num_threads=4, device_id=0):
super(PreemphasisPipeline, self).__init__(batch_size, num_threads, device_id, seed=SEED)
self.device = device
self.iterator = iterator
self.per_sample_coeff = per_sample_coeff
self.uniform = ops.random.Uniform(range=(0.5, 0.97), seed=1234)
if self.per_sample_coeff:
self.preemph = ops.PreemphasisFilter(device=device, border=border)
else:
self.preemph = ops.PreemphasisFilter(device=device, border=border,
preemph_coeff=preemph_coeff)
def define_graph(self):
data = fn.external_source(lambda: next(self.iterator))
out = data.gpu() if self.device == 'gpu' else data
if self.per_sample_coeff:
preemph_coeff_arg = self.uniform()
return self.preemph(out, preemph_coeff=preemph_coeff_arg)
else:
return self.preemph(out)
class PreemphasisPythonPipeline(Pipeline):
def __init__(self, device, batch_size, iterator, border='clamp', preemph_coeff=0.97,
per_sample_coeff=False, num_threads=4, device_id=0):
super(PreemphasisPythonPipeline,
self).__init__(batch_size, num_threads, device_id, seed=SEED, exec_async=False,
exec_pipelined=False)
self.device = "cpu"
self.iterator = iterator
self.per_sample_coeff = per_sample_coeff
self.uniform = ops.random.Uniform(range=(0.5, 0.97), seed=1234)
if self.per_sample_coeff:
function = partial(preemph_func, border)
else:
function = partial(preemph_func, border, preemph_coeff)
self.preemph = ops.PythonFunction(function=function)
def define_graph(self):
data = fn.external_source(lambda: next(self.iterator))
if self.per_sample_coeff:
coef = self.uniform()
return self.preemph(coef, data)
else:
return self.preemph(data)
def check_preemphasis_operator(device, batch_size, border, preemph_coeff, per_sample_coeff):
eii1 = RandomlyShapedDataIterator(batch_size, min_shape=(100, ), max_shape=(10000, ),
dtype=np.float32)
eii2 = RandomlyShapedDataIterator(batch_size, min_shape=(100, ), max_shape=(10000, ),
dtype=np.float32)
compare_pipelines(
PreemphasisPipeline(device, batch_size, iter(eii1), border=border,
preemph_coeff=preemph_coeff, per_sample_coeff=per_sample_coeff),
PreemphasisPythonPipeline(device, batch_size, iter(eii2), border=border,
preemph_coeff=preemph_coeff, per_sample_coeff=per_sample_coeff),
batch_size=batch_size, N_iterations=3)
def test_preemphasis_operator():
for device in ['cpu', 'gpu']:
for batch_size in [1, 3, 128]:
for border in ['zero', 'clamp', 'reflect']:
for coef, per_sample_coeff in [(0.97, False), (0.0, False), (None, True)]:
yield (check_preemphasis_operator, device, batch_size, border, coef,
per_sample_coeff)
|
DALI-main
|
dali/test/python/operator_2/test_preemph.py
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali as dali
import numpy as np
from functools import partial
from test_utils import get_files
from test_utils import compare_pipelines
from test_utils import RandomDataIterator
from test_utils import ConstantDataIterator
import librosa as librosa
import math
audio_files = get_files('db/audio/wav', 'wav')
class SpectrogramPipeline(Pipeline):
def __init__(self, device, batch_size, iterator, nfft, window_length, window_step,
window=None, center=None, num_threads=1, device_id=0):
super(SpectrogramPipeline, self).__init__(batch_size, num_threads, device_id)
self.device = device
self.iterator = iterator
self.inputs = ops.ExternalSource()
window_fn = window(window_length).tolist() if window is not None else None
self.fft = ops.Spectrogram(device=self.device,
nfft=nfft,
window_length=window_length,
window_step=window_step,
window_fn=window_fn,
center_windows=center,
power=2)
# randomly insert extra axis (channels?)
self.r = np.random.randint(-1, 2)
def define_graph(self):
self.data = self.inputs()
out = self.data.gpu() if self.device == 'gpu' else self.data
out = self.fft(out)
return out
def iter_setup(self):
data = self.iterator.next()
if self.r == 0:
data = [x[np.newaxis, :] for x in data]
elif self.r == 1:
data = [x[:, np.newaxis] for x in data]
self.feed_input(self.data, data)
def hann_win(n):
hann = np.ones([n], dtype=np.float32)
a = (2.0 * math.pi / n)
for t in range(n):
phase = a * (t + 0.5)
hann[t] = 0.5 * (1.0 - math.cos(phase))
return hann
def cos_win(n):
phase = (np.arange(n) + 0.5) * (math.pi / n)
return np.sin(phase).astype(np.float32)
def spectrogram_func_librosa(nfft, win_len, win_step, window, center, input_data):
# Squeeze to 1d
if len(input_data.shape) > 1:
input_data = np.squeeze(input_data)
if window is None:
window = hann_win
out = np.abs(librosa.stft(y=input_data, n_fft=nfft or win_len, center=center,
win_length=win_len, hop_length=win_step, window=window))**2
# Alternative way to calculate the spectrogram:
# out, _ = librosa.core.spectrum._spectrogram(
# y=input_data, n_fft=nfft, hop_length=win_step, window=hann_win, power=2)
return out
class SpectrogramPythonPipeline(Pipeline):
def __init__(self, device, batch_size, iterator, nfft, window_length,
window_step, window=None, center=None, num_threads=1,
device_id=0, spectrogram_func=spectrogram_func_librosa):
super(SpectrogramPythonPipeline, self).__init__(
batch_size, num_threads, device_id,
seed=12345, exec_async=False, exec_pipelined=False)
self.device = "cpu"
self.iterator = iterator
self.inputs = ops.ExternalSource()
function = partial(spectrogram_func, nfft, window_length, window_step, window, center)
self.spectrogram = ops.PythonFunction(function=function, output_layouts=["ft"])
def define_graph(self):
self.data = self.inputs()
out = self.spectrogram(self.data)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data)
def check_operator_spectrogram_vs_python(device, batch_size, input_shape,
nfft, window_length, window_step, center):
eii1 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
eii2 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
compare_pipelines(
SpectrogramPipeline(device, batch_size, iter(eii1), nfft=nfft, window=None,
window_length=window_length, window_step=window_step, center=center),
SpectrogramPythonPipeline(device, batch_size, iter(eii2), window=None,
nfft=nfft, window_length=window_length,
window_step=window_step, center=center),
batch_size=batch_size, N_iterations=3, eps=1e-04)
def test_operator_spectrogram_vs_python():
for device in ['cpu', 'gpu']:
for batch_size in [3]:
for center in [False, True]:
for nfft, window_length, window_step, shape in [(256, 256, 128, (1, 4096)),
(256, 256, 128, (4096,)),
(256, 256, 128, (4096, 1)),
(256, 256, 128, (1, 1, 4096, 1)),
(16, 16, 8, (1, 1000)),
(10, 10, 5, (1, 1000)),
(None, 10, 5, (1, 1000))]:
yield check_operator_spectrogram_vs_python, device, batch_size, shape, \
nfft, window_length, window_step, center
def check_operator_spectrogram_vs_python_wave_1d(device, batch_size, input_length,
nfft, window_length, window_step, window, center):
f = 4000 # [Hz]
sr = 44100 # [Hz]
x = np.arange(input_length, dtype=np.float32)
y = np.sin(2 * np.pi * f * x / sr)
data1 = ConstantDataIterator(batch_size, y, dtype=np.float32)
data2 = ConstantDataIterator(batch_size, y, dtype=np.float32)
compare_pipelines(
SpectrogramPipeline(device, batch_size, iter(data1), nfft=nfft,
window_length=window_length, window_step=window_step,
window=window, center=center),
SpectrogramPythonPipeline(device, batch_size, iter(data2),
nfft=nfft, window_length=window_length, window_step=window_step,
window=window, center=center),
batch_size=batch_size, N_iterations=3, eps=1e-04)
def test_operator_spectrogram_vs_python_wave():
for device in ['cpu', 'gpu']:
for window in [None, hann_win, cos_win]:
for batch_size in [3]:
for nfft, window_length, window_step, length in [(256, 256, 128, 4096),
(128, 100, 61, 1000),
(10, 10, 5, 1000)]:
# Note: center_windows=False and nfft > window_length doesn't work like librosa.
# Librosa seems to disregard window_length
# and extract windows of nfft size regardless
for center in [False, True] if nfft == window_length else [True]:
yield check_operator_spectrogram_vs_python_wave_1d, device, batch_size, \
length, nfft, window_length, window_step, window, center
class AudioSpectrogramPipeline(Pipeline):
def __init__(self, device, batch_size, nfft, window_length, window_step, center, layout="ft",
num_threads=1, device_id=0):
super(AudioSpectrogramPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.readers.File(device="cpu", files=audio_files)
self.decode = ops.decoders.Audio(device="cpu", dtype=types.FLOAT, downmix=True)
self.fft = ops.Spectrogram(device=device,
nfft=nfft,
window_length=window_length,
window_step=window_step,
power=2,
center_windows=center,
layout=layout)
def define_graph(self):
read, _ = self.input()
audio, rate = self.decode(read)
if self.fft.device == "gpu":
audio = audio.gpu()
spec = self.fft(audio)
return spec
class AudioSpectrogramPythonPipeline(Pipeline):
def __init__(self, batch_size, nfft, window_length, window_step, center, layout="ft",
num_threads=1, device_id=0, spectrogram_func=spectrogram_func_librosa):
super(AudioSpectrogramPythonPipeline, self).__init__(
batch_size, num_threads, device_id,
seed=12345, exec_async=False, exec_pipelined=False)
self.input = ops.readers.File(device="cpu", files=audio_files)
self.decode = ops.decoders.Audio(device="cpu", dtype=types.FLOAT, downmix=True)
function = partial(spectrogram_func, nfft, window_length, window_step, None, center)
self.spectrogram = ops.PythonFunction(function=function, output_layouts=["ft"])
self.layout = layout
def define_graph(self):
read, _ = self.input()
audio, rate = self.decode(read)
out = self.spectrogram(audio)
if self.layout == "tf":
out = dali.fn.transpose(out, perm=[1, 0], transpose_layout=True)
return out
def check_operator_decoder_and_spectrogram_vs_python(device, batch_size, nfft, window_length,
window_step, center, layout):
compare_pipelines(
AudioSpectrogramPipeline(device=device, batch_size=batch_size,
nfft=nfft, window_length=window_length, window_step=window_step,
center=center, layout=layout),
AudioSpectrogramPythonPipeline(batch_size, nfft=nfft,
window_length=window_length, window_step=window_step,
center=center, layout=layout),
batch_size=batch_size, N_iterations=3, eps=1e-04)
def test_operator_decoder_and_spectrogram():
for device in ["cpu", "gpu"]:
for layout in ["tf", "ft"]:
for batch_size in [3]:
for nfft, window_length, window_step in [(256, 256, 128),
(256, 256, 128),
(256, 256, 128),
(256, 256, 128,),
(256, 256, 128,),
(16, 16, 8,),
(10, 10, 5,)]:
# Note: center_windows=False and nfft > window_length doesn't work like librosa.
# Librosa seems to disregards window_length
# and extract windows of nfft size regardless
for center in [False, True] if nfft == window_length else [True]:
yield check_operator_decoder_and_spectrogram_vs_python, device, \
batch_size, nfft, window_length, window_step, center, layout
|
DALI-main
|
dali/test/python/operator_2/test_spectrogram.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.fn as fn
import numpy as np
from nose_utils import raises
def get_sequence(shape, offset=0):
assert len(shape) > 1
elem_shape = shape.copy()
seq_length = elem_shape[0]
elem_shape[0] = 1
elems = []
for i in range(seq_length):
elems.append(np.full(elem_shape, offset + i))
return np.concatenate(elems, axis=0)
def get_sequences(batch_size, shape):
batch = []
for i in range(batch_size):
batch.append(get_sequence(shape, i * shape[0]))
return batch
def reorder_sample(sample, seq_len, order):
"""
Reorder sequence in one sample according to order parameter
"""
split = np.split(sample, seq_len)
reordered = []
for i in range(len(order)):
reordered.append(split[order[i]])
return np.concatenate(reordered, axis=0)
def reorder(input, seq_len, reorders, persample_reorder=True):
"""
Reorder the whole batch of sequences according to `reorders`
reorders is one list with new order or list of new_orders depending on `persample_reorder`
"""
result = []
for i, sample in enumerate(input):
order = reorders[i] if persample_reorder else reorders
result.append(reorder_sample(sample, seq_len, order))
return result
def to_batch(tl, batch_size):
return [np.array(tl[i]) for i in range(batch_size)]
def check_sequence_rearrange(batch_size, shape, reorders, persample_reorder=True, op_type="cpu",
layout=""):
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
with pipe:
input = fn.external_source(lambda: get_sequences(batch_size, shape), layout=layout)
frames = input.gpu() if op_type == "gpu" else input
order = fn.external_source(lambda: reorders) if persample_reorder else reorders
rearranged = fn.sequence_rearrange(frames, new_order=order, device=op_type)
pipe.set_outputs(rearranged, input)
pipe.build()
result, input = pipe.run()
if op_type == "gpu":
result = result.as_cpu()
input = to_batch(input, batch_size)
baseline = reorder(input, shape[0], reorders, persample_reorder)
for i in range(batch_size):
np.testing.assert_array_equal(result[i], baseline[i])
order_0 = ([3, 2, 1, 0], False)
order_1 = ([np.int32([3, 0]),
np.int32([2, 1]),
np.int32([1, 1]),
np.int32([0, 1, 2]),
np.int32([3])], True)
order_2 = ([np.int32([0]),
np.int32([1]),
np.int32([2]),
np.int32([3]),
np.int32([0, 1, 2, 3])], True)
def test_sequence_rearrange():
for dev in ["cpu", "gpu"]:
for shape in [[4, 3, 2], [5, 1]]:
for new_order, per_sample in [order_0, order_1, order_2]:
for layout in ["FHW"[:len(shape)], ""]:
yield check_sequence_rearrange, 5, shape, new_order, per_sample, dev, layout
def check_fail_sequence_rearrange(batch_size, shape, reorders, persample_reorder=True,
op_type="cpu", layout=""):
check_sequence_rearrange(batch_size, shape, reorders, persample_reorder, op_type, layout)
def test_fail_sequence_rearrange():
shape = [5, 1]
orders = [
([6, 7], False),
([-1], False),
([], False),
([np.int32([0]), np.int32([])], True),
([np.int32([6, 7]), np.int32([0])], True),
([np.int32([-1]), np.int32([0])], True),
([np.int32([[1], [2]]), np.int32([[1], [2]])], True)
]
error_msgs = [
'new_order[[]*[]] must be between * and input_sequence_length = * for sample *, but it is: *', # noqa:E501
'new_order[[]*[]] must be between * and input_sequence_length = * for sample *, but it is: *', # noqa:E501
'Empty result sequences are not allowed',
'Empty `new_order` for sample * is not allowed',
'new_order[[]*[]] must be between * and input_sequence_length = * for sample *, but it is: *', # noqa:E501
'new_order[[]*[]] must be between * and input_sequence_length = * for sample *, but it is: *', # noqa:E501
'Input with dimension * cannot be converted to dimension *'
]
assert len(orders) == len(error_msgs)
for dev in ["cpu", "gpu"]:
for [new_order, per_sample], error_msg in zip(orders, error_msgs):
yield raises(
RuntimeError,
glob=error_msg
)(check_fail_sequence_rearrange), 2, shape, new_order, per_sample, dev
def test_wrong_layouts_sequence_rearrange():
shape = [5, 1]
new_order = [0, 2, 1, 3, 4]
per_sample = False
for dev in ["cpu", "gpu"]:
for layout in ["HF", "HW"]:
yield raises(
RuntimeError,
glob=('Expected sequence as the input, where outermost dimension represents'
' frames dimension `F`, got data with layout = "H[WF]"')
)(check_fail_sequence_rearrange), 5, shape, new_order, per_sample, dev, layout
|
DALI-main
|
dali/test/python/operator_2/test_sequence_rearrange.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali as dali
import nvidia.dali.fn as fn
import test_utils
def close(a, b):
if isinstance(a, np.float32):
return np.isclose(a, b)
absdiff = a - b if b < a else b - a
return absdiff <= 1
def analyze_frame(image, channel_dim):
def pixel(x, y):
return image[:, y, x] if channel_dim == 0 else image[y, x, :]
x0, y0, f0 = pixel(0, 0)
x1, y1, f1 = pixel(-1, 0)
x2, y2, f2 = pixel(0, -1)
x3, y3, f3 = pixel(-1, -1)
assert close(x0, x2), "x0 = {} != x2 = {}".format(x0, x2)
assert close(x1, x3), "x1 = {} != x3 = {}".format(x1, x3)
assert close(y0, y1), "y0 = {} != y1 = {}".format(y0, y1)
assert close(y2, y3), "y2 = {} != y3 = {}".format(y2, y3)
assert close(f0, f1) and close(f0, f2) and close(f0, f3)
return x0, y0, x3, y3, int(np.round(f0))
def check_frame(image, frame_index, total_frames, channel_dim, roi, w, h, aspect_ratio_range,
area_range, value_range):
x0, y0, x1, y1, f = analyze_frame(image, channel_dim)
assert f == frame_index * value_range // total_frames
out_h, out_w = image.shape[:2] if channel_dim != 0 else image.shape[1:3]
xeps = np.ceil(2 + 2 * w / out_w)
yeps = np.ceil(2 + 2 * h / out_h)
if frame_index == 0:
roi_w_max = min((x1 - x0) * w / value_range + xeps, w)
roi_w_min = max((x1 - x0) * w / value_range - xeps, 1)
roi_h_max = min((y1 - y0) * h / value_range + yeps, h)
roi_h_min = max((y1 - y0) * h / value_range - xeps, 1)
ratio_min = roi_w_min / roi_h_max
ratio_max = roi_w_max / roi_h_min
area_min = roi_w_min * roi_h_min / (w * h)
area_max = roi_w_max * roi_h_max / (w * h)
assert ratio_max >= aspect_ratio_range[0] and ratio_min <= aspect_ratio_range[1], \
"aspect ratio estimated at {}..{} outside valid range [{} .. {}]".format(
ratio_min, ratio_min, *aspect_ratio_range)
assert area_max >= area_range[0] and area_min <= area_range[1], \
"area estimated at {}..{} outside valiid range [{} .. {}]".format(
area_min, area_max, *area_range)
return x0, y0, x1, y1
else:
assert (x0, y0, x1, y1) == roi
return roi
def check_seq(seq, channel_dim, w, h, aspect_ratio_range, area_range, value_range):
frame_dim = 1 if channel_dim == 0 else 0
frame_channel_dim = -1 if channel_dim == -1 else 0
roi = None
total_frames = seq.shape[frame_dim]
for f in range(total_frames):
frame = seq[:, f] if frame_dim == 1 else seq[f]
roi = check_frame(frame, f, total_frames, frame_channel_dim, roi, w, h, aspect_ratio_range,
area_range, value_range)
def check_output(output, channel_dim, input_shape, aspect_ratio_range, area_range, value_range):
if len(input_shape) == 3:
h, w = input_shape[1:3] if channel_dim == 0 else input_shape[0:2]
check_frame(output, 0, 1, channel_dim, None, w, h, aspect_ratio_range, area_range,
value_range)
else:
hidx = 1 if channel_dim == -1 else 2
h, w = input_shape[hidx:hidx + 2]
check_seq(output, channel_dim, w, h, aspect_ratio_range, area_range, value_range)
def type_range(type):
if np.issubdtype(type, np.integer):
return np.iinfo(type).max
else:
return 100000
def generate_data(frames, width, height, channel_dim, type):
value_range = type_range(type)
no_frames = (frames is None)
if no_frames:
frames = 1
x = (np.arange(0, width) * value_range // width).astype(type)[np.newaxis, np.newaxis, :]
y = (np.arange(0, height) * value_range // height).astype(type)[np.newaxis, :, np.newaxis]
f = (np.arange(0, frames) * value_range // frames).astype(type)[:, np.newaxis, np.newaxis]
x = np.broadcast_to(x, (frames, height, width))
y = np.broadcast_to(y, (frames, height, width))
f = np.broadcast_to(f, (frames, height, width))
seq = np.stack([x, y, f], axis=channel_dim)
if no_frames:
seq = seq[:, 0] if channel_dim == 0 else seq[0]
return seq
def generator(batch_size, max_frames, channel_dim, type):
type = test_utils.dali_type_to_np(type)
assert max_frames is not None or channel_dim != 1
def generate():
batch = []
for _ in range(batch_size):
frames = None if max_frames is None else np.random.randint(1, max_frames + 1)
sz = np.random.randint(100, 2000 / (max_frames or 1))
w, h = np.random.randint(sz, 2 * sz, [2])
batch.append(generate_data(frames, w, h, channel_dim, type))
return batch
return generate
def _test_rrc(device, max_frames, layout, aspect_ratio_range, area_range, output_size, input_type,
output_type):
batch_size = 4
pipe = dali.pipeline.Pipeline(batch_size, 4, 0)
channel_dim = layout.find('C')
value_range = type_range(test_utils.dali_type_to_np(input_type))
if channel_dim == len(layout) - 1:
channel_dim = -1
input = fn.external_source(source=generator(batch_size, max_frames, channel_dim, input_type),
layout=layout)
shape = fn.shapes(input)
if device == "gpu":
input = input.gpu()
out = fn.random_resized_crop(input, random_aspect_ratio=aspect_ratio_range,
random_area=area_range, size=output_size,
interp_type=dali.types.INTERP_LINEAR,
antialias=False, seed=12321,
dtype=output_type)
pipe.set_outputs(out, shape)
pipe.build()
for iter in range(3):
outputs, input_shapes = pipe.run()
if device == "gpu":
outputs = outputs.as_cpu()
assert outputs.layout() == layout
for i in range(batch_size):
out = outputs.at(i)
input_shape = input_shapes.at(i).tolist()
check_output(out, channel_dim, input_shape, aspect_ratio_range, area_range, value_range)
def test_random_resized_crop():
np.random.seed(12345)
types = [dali.types.UINT8, dali.types.INT16, dali.types.FLOAT]
sizes = [(100, 100), (320, 240)]
for device in ["cpu", "gpu"]:
for layout, max_frames in [("FHWC", 8), ("FCHW", 1), ("CFHW", 1),
("HWC", None), ("CHW", None)]:
for aspect, area in [
((0.5, 2), (0.1, 0.8)),
((1, 2), (0.4, 1.0)),
((0.5, 1), (0.1, 0.5))
]:
input_type = types[np.random.randint(0, len(types))]
output_type = dali.types.FLOAT if np.random.randint(0, 2) else None
size = sizes[np.random.randint(0, len(sizes))]
yield _test_rrc, device, max_frames, layout, aspect, area, size, \
input_type, output_type
|
DALI-main
|
dali/test/python/operator_2/test_random_resized_crop.py
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nose_utils # noqa:F401 - for Python 3.10
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import nvidia.dali as dali
import numpy as np
import os
import cv2
from test_utils import compare_pipelines
from test_utils import get_dali_extra_path
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
class WaterPipeline(Pipeline):
def __init__(self, device, batch_size, phase_y, phase_x, freq_x, freq_y, ampl_x, ampl_y,
num_threads=3, device_id=0, num_gpus=1, dtype=types.UINT8, prime_size=False,
do_mask=False):
super(WaterPipeline, self).__init__(batch_size, num_threads, device_id)
self.device = device
self.dtype = dtype
self.prime_size = prime_size
self.do_mask = do_mask
self.input = ops.readers.Caffe(path=caffe_db_folder,
shard_id=device_id,
num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.water = ops.Water(device=self.device,
ampl_x=ampl_x, ampl_y=ampl_y,
phase_x=phase_x, phase_y=phase_y,
freq_x=freq_x, freq_y=freq_y,
interp_type=dali.types.INTERP_LINEAR)
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
if self.device == 'gpu':
images = images.gpu()
if self.prime_size:
images = fn.resize(images, resize_x=101, resize_y=43)
mask = fn.random.coin_flip(seed=42) if self.do_mask else None
images = fn.cast(images, dtype=self.dtype)
images = self.water(images, mask=mask)
return images
def python_water(img, phase_y, phase_x, freq_x, freq_y, ampl_x, ampl_y):
nh, nw = img.shape[:2]
img_x = np.zeros((nh, nw), np.float32)
img_y = np.zeros((nh, nw), np.float32)
x_idx = np.arange(0, nw, 1, np.float32)
y_idx = np.arange(0, nh, 1, np.float32)
x_wave = ampl_y * np.cos(freq_y * x_idx + phase_y)
y_wave = ampl_x * np.sin(freq_x * y_idx + phase_x)
for x in range(nw):
img_x[:, x] = y_wave + x - 0.5
for y in range(nh):
img_y[y, :] = x_wave + y - 0.5
return cv2.remap(img, img_x, img_y, cv2.INTER_LINEAR)
class WaterPythonPipeline(Pipeline):
def __init__(self, batch_size, function,
num_threads=1, device_id=0, num_gpus=1,
dtype=types.UINT8,
prime_size=False):
super().__init__(batch_size,
num_threads,
device_id,
exec_async=False,
exec_pipelined=False)
self.dtype = dtype
self.prime_size = prime_size
self.input = ops.readers.Caffe(path=caffe_db_folder,
shard_id=device_id,
num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.water = ops.PythonFunction(function=function, output_layouts="HWC")
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
if self.prime_size:
images = fn.resize(images, resize_x=101, resize_y=43)
images = fn.cast(images, dtype=self.dtype)
images = self.water(images)
return images
def check_water_cpu_vs_gpu(batch_size, niter, dtype, do_mask):
phase_y = 0.5
phase_x = 0.2
freq_x = 0.06
freq_y = 0.08
ampl_x = 2.0
ampl_y = 3.0
compare_pipelines(WaterPipeline('cpu', batch_size,
ampl_x=ampl_x, ampl_y=ampl_y,
phase_x=phase_x, phase_y=phase_y,
freq_x=freq_x, freq_y=freq_y,
dtype=dtype,
do_mask=do_mask),
WaterPipeline('gpu', batch_size,
ampl_x=ampl_x, ampl_y=ampl_y,
phase_x=phase_x, phase_y=phase_y,
freq_x=freq_x, freq_y=freq_y,
dtype=dtype,
do_mask=do_mask),
batch_size=batch_size,
N_iterations=niter,
eps=1)
def test_water_cpu_vs_gpu():
niter = 3
for batch_size in [1, 3]:
for do_mask in [False, True]:
for dtype in [types.UINT8, types.FLOAT]:
yield check_water_cpu_vs_gpu, batch_size, niter, dtype, do_mask
def check_water_vs_cv(device, batch_size, niter, dtype, prime_size):
phase_y = 0.5
phase_x = 0.2
freq_x = 0.06
freq_y = 0.08
ampl_x = 2.0
ampl_y = 3.0
def python_func(img):
return python_water(img, phase_y, phase_x, freq_x, freq_y, ampl_x, ampl_y)
compare_pipelines(WaterPipeline(device, batch_size,
ampl_x=ampl_x, ampl_y=ampl_y,
phase_x=phase_x, phase_y=phase_y,
freq_x=freq_x, freq_y=freq_y,
dtype=dtype,
prime_size=prime_size),
WaterPythonPipeline(batch_size, python_func, dtype=dtype,
prime_size=prime_size),
batch_size=batch_size,
N_iterations=niter,
eps=8)
def test_water_vs_cv():
niter = 3
for device in ['cpu', 'gpu']:
for batch_size in [1, 3]:
for dtype in [types.UINT8, types.FLOAT]:
for prime_size in [False, True]:
yield check_water_vs_cv, device, batch_size, niter, dtype, prime_size
|
DALI-main
|
dali/test/python/operator_2/test_water.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import glob
import numpy
from nvidia.dali import pipeline_def
import nvidia.dali.fn as fn
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import os
import random
import tempfile
import time
from PIL import Image, ImageEnhance
from nvidia.dali.ops import _DataNode
from nvidia.dali.pipeline import Pipeline
from nose_utils import raises
from test_utils import get_dali_extra_path
test_data_root = get_dali_extra_path()
images_dir = os.path.join(test_data_root, 'db', 'single', 'jpeg')
def test_dlpack_conversions():
array = numpy.arange(0, 10, 0.5)
reshaped = array.reshape((2, 10, 1))
slice = reshaped[:, 2:5, :]
dlpack = ops._dlpack_from_array(slice)
result_array = ops._dlpack_to_array(dlpack)
assert result_array.shape == slice.shape
assert numpy.array_equal(result_array, slice)
def resize(image):
return numpy.array(Image.fromarray(image).resize((300, 300)))
class CommonPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, _seed, image_dir,
prefetch_queue_depth=2):
super().__init__(batch_size, num_threads, device_id, seed=_seed,
prefetch_queue_depth=prefetch_queue_depth)
self.input = ops.readers.File(file_root=image_dir)
self.decode = ops.decoders.Image(device='cpu', output_type=types.RGB)
self.resize = ops.PythonFunction(function=resize, output_layouts='HWC')
def load(self):
jpegs, labels = self.input()
decoded = self.decode(jpegs)
resized = self.resize(decoded)
return resized, labels
def define_graph(self):
pass
class BasicPipeline(CommonPipeline):
def __init__(self, batch_size, num_threads, device_id, seed, image_dir):
super().__init__(batch_size, num_threads, device_id, seed, image_dir)
def define_graph(self):
images, labels = self.load()
return images
class PythonOperatorPipeline(CommonPipeline):
def __init__(self, batch_size, num_threads, device_id, seed, image_dir, function,
prefetch_queue_depth=2):
super().__init__(batch_size, num_threads, device_id, seed, image_dir,
prefetch_queue_depth=prefetch_queue_depth)
self.python_function = ops.PythonFunction(function=function)
def define_graph(self):
images, labels = self.load()
processed = self.python_function(images)
assert isinstance(processed, _DataNode)
return processed
class FlippingPipeline(CommonPipeline):
def __init__(self, batch_size, num_threads, device_id, seed, image_dir):
super().__init__(batch_size, num_threads, device_id, seed, image_dir)
self.flip = ops.Flip(horizontal=1)
def define_graph(self):
images, labels = self.load()
flipped = self.flip(images)
return flipped
class TwoOutputsPythonOperatorPipeline(CommonPipeline):
def __init__(self, batch_size, num_threads, device_id, seed, image_dir, function,
op=ops.PythonFunction):
super().__init__(batch_size, num_threads, device_id, seed, image_dir)
self.python_function = op(function=function, num_outputs=2)
def define_graph(self):
images, labels = self.load()
out1, out2 = self.python_function(images)
assert isinstance(out1, _DataNode)
assert isinstance(out2, _DataNode)
return out1, out2
class MultiInputMultiOutputPipeline(CommonPipeline):
def __init__(self, batch_size, num_threads, device_id, seed, image_dir, function,
batch_processing=False):
super().__init__(batch_size, num_threads, device_id, seed, image_dir)
self.python_function = ops.PythonFunction(function=function, num_outputs=3,
batch_processing=batch_processing)
def define_graph(self):
images1, labels1 = self.load()
images2, labels2 = self.load()
out1, out2, out3 = self.python_function(images1, images2)
assert isinstance(out1, _DataNode)
assert isinstance(out2, _DataNode)
assert isinstance(out3, _DataNode)
return out1, out2, out3
class DoubleLoadPipeline(CommonPipeline):
def __init__(self, batch_size, num_threads, device_id, seed, image_dir):
super().__init__(batch_size, num_threads, device_id, seed, image_dir)
def define_graph(self):
images1, labels1 = self.load()
images2, labels2 = self.load()
return images1, images2
class SinkTestPipeline(CommonPipeline):
def __init__(self, batch_size, device_id, seed, image_dir, function):
super().__init__(batch_size, 1, device_id, seed, image_dir)
self.python_function = ops.PythonFunction(function=function, num_outputs=0)
def define_graph(self):
images, labels = self.load()
self.python_function(images)
return images
class PythonOperatorInputSetsPipeline(PythonOperatorPipeline):
def __init__(self, batch_size, num_threads, device_id, seed, image_dir, function):
super().__init__(batch_size, num_threads, device_id, seed, image_dir, function)
self.python_function = ops.PythonFunction(function=function)
def define_graph(self):
images, labels = self.load()
processed = self.python_function([images, images])
return processed
def random_seed():
return int(random.random() * (1 << 32))
DEVICE_ID = 0
BATCH_SIZE = 8
ITERS = 16
SEED = random_seed()
NUM_WORKERS = 6
def run_case(func):
pipe = BasicPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED, images_dir)
pyfunc_pipe = PythonOperatorPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED, images_dir, func)
pipe.build()
pyfunc_pipe.build()
for it in range(ITERS):
preprocessed_output, = pipe.run()
output, = pyfunc_pipe.run()
for i in range(len(output)):
assert numpy.array_equal(output.at(i), func(preprocessed_output.at(i)))
def one_channel_normalize(image):
return image[:, :, 1] / 255.
def channels_mean(image):
r = numpy.mean(image[:, :, 0])
g = numpy.mean(image[:, :, 1])
b = numpy.mean(image[:, :, 2])
return numpy.array([r, g, b])
def bias(image):
return numpy.array(image > 127, dtype=bool)
def flip(image):
return numpy.fliplr(image)
def flip_batch(images):
return [flip(x) for x in images]
def dlflip(image):
image = ops._dlpack_to_array(image)
out = numpy.fliplr(image)
out = ops._dlpack_from_array(out)
return out
def dlflip_batch(images):
return [dlflip(x) for x in images]
def Rotate(image):
return numpy.rot90(image)
def Brightness(image):
return numpy.array(ImageEnhance.Brightness(Image.fromarray(image)).enhance(0.5))
def test_python_operator_one_channel_normalize():
run_case(one_channel_normalize)
def test_python_operator_channels_mean():
run_case(channels_mean)
def test_python_operator_bias():
run_case(bias)
def test_python_operator_flip():
dali_flip = FlippingPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED, images_dir)
numpy_flip = PythonOperatorPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED, images_dir, flip)
dali_flip.build()
numpy_flip.build()
for it in range(ITERS):
numpy_output, = numpy_flip.run()
dali_output, = dali_flip.run()
for i in range(len(numpy_output)):
assert numpy.array_equal(numpy_output.at(i), dali_output.at(i))
class RotatePipeline(CommonPipeline):
def __init__(self, batch_size, num_threads, device_id, seed, image_dir):
super().__init__(batch_size, num_threads, device_id, seed, image_dir)
self.rotate = ops.Rotate(angle=90.0, interp_type=types.INTERP_NN)
def define_graph(self):
images, labels = self.load()
rotate = self.rotate(images)
return rotate
class BrightnessPipeline(CommonPipeline):
def __init__(self, batch_size, num_threads, device_id, seed, image_dir):
super().__init__(batch_size, num_threads, device_id, seed, image_dir)
self.brightness = ops.BrightnessContrast(device="gpu", brightness=0.5)
def define_graph(self):
images, labels = self.load()
bright = self.brightness(images.gpu())
return bright
def test_python_operator_rotate():
dali_rotate = RotatePipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED, images_dir)
numpy_rotate = PythonOperatorPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED, images_dir,
Rotate)
dali_rotate.build()
numpy_rotate.build()
for it in range(ITERS):
numpy_output, = numpy_rotate.run()
dali_output, = dali_rotate.run()
for i in range(len(numpy_output)):
if not numpy.array_equal(numpy_output.at(i), dali_output.at(i)):
cv2.imwrite("numpy.png", numpy_output.at(i))
cv2.imwrite("dali.png", dali_output.at(i))
assert numpy.array_equal(numpy_output.at(i), dali_output.at(i))
def test_python_operator_brightness():
dali_brightness = BrightnessPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED, images_dir)
numpy_brightness = PythonOperatorPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED, images_dir,
Brightness)
dali_brightness.build()
numpy_brightness.build()
for it in range(ITERS):
numpy_output, = numpy_brightness.run()
dali_output, = dali_brightness.run()
for i in range(len(dali_output)):
assert numpy.allclose(numpy_output.at(i), dali_output.as_cpu().at(i), rtol=1e-5, atol=1)
def invalid_function(image):
return img # noqa: F821. This shall be an invalid function.
@raises(RuntimeError, "img*not defined")
def test_python_operator_invalid_function():
invalid_pipe = PythonOperatorPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED, images_dir,
invalid_function)
invalid_pipe.build()
invalid_pipe.run()
@raises(TypeError, "do not support multiple input sets")
def test_python_operator_with_input_sets():
invalid_pipe = PythonOperatorInputSetsPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED,
images_dir, Rotate)
invalid_pipe.build()
def split_red_blue(image):
return image[:, :, 0], image[:, :, 2]
def mixed_types(image):
return bias(image), one_channel_normalize(image)
def run_two_outputs(func):
pipe = BasicPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED, images_dir)
pyfunc_pipe = TwoOutputsPythonOperatorPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED,
images_dir, func)
pipe.build()
pyfunc_pipe.build()
for it in range(ITERS):
preprocessed_output, = pipe.run()
output1, output2 = pyfunc_pipe.run()
for i in range(len(output1)):
pro1, pro2 = func(preprocessed_output.at(i))
assert numpy.array_equal(output1.at(i), pro1)
assert numpy.array_equal(output2.at(i), pro2)
def test_split():
run_two_outputs(split_red_blue)
def test_mixed_types():
run_two_outputs(mixed_types)
def multi_per_sample_compare(func, pipe, pyfunc_pipe):
for it in range(ITERS):
preprocessed_output1, preprocessed_output2 = pipe.run()
out1, out2, out3 = pyfunc_pipe.run()
for i in range(BATCH_SIZE):
pro1, pro2, pro3 = func(preprocessed_output1.at(i), preprocessed_output2.at(i))
assert numpy.array_equal(out1.at(i), pro1)
assert numpy.array_equal(out2.at(i), pro2)
assert numpy.array_equal(out3.at(i), pro3)
def multi_batch_compare(func, pipe, pyfunc_pipe):
for it in range(ITERS):
preprocessed_output1, preprocessed_output2 = pipe.run()
out1, out2, out3 = pyfunc_pipe.run()
in1 = [preprocessed_output1.at(i) for i in range(BATCH_SIZE)]
in2 = [preprocessed_output2.at(i) for i in range(BATCH_SIZE)]
pro1, pro2, pro3 = func(in1, in2)
for i in range(BATCH_SIZE):
assert numpy.array_equal(out1.at(i), pro1[i])
assert numpy.array_equal(out2.at(i), pro2[i])
assert numpy.array_equal(out3.at(i), pro3[i])
def run_multi_input_multi_output(func, compare, batch=False):
pipe = DoubleLoadPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED, images_dir)
pyfunc_pipe = MultiInputMultiOutputPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED,
images_dir, func, batch_processing=batch)
pipe.build()
pyfunc_pipe.build()
compare(func, pipe, pyfunc_pipe)
def split_and_mix(images1, images2):
r = (images1[:, :, 0] + images2[:, :, 0]) // 2
g = (images1[:, :, 1] + images2[:, :, 1]) // 2
b = (images1[:, :, 2] + images2[:, :, 2]) // 2
return r, g, b
def output_with_stride_mixed_types(images1, images2):
return images1[:, :, 2], one_channel_normalize(images2), images1 > images2
def test_split_and_mix():
run_multi_input_multi_output(split_and_mix, multi_per_sample_compare)
def test_output_with_stride_mixed_types():
run_multi_input_multi_output(output_with_stride_mixed_types, multi_per_sample_compare)
def mix_and_split_batch(images1, images2):
mixed = [(images1[i] + images2[i]) // 2 for i in range(len(images1))]
r = [im[:, :, 0] for im in mixed]
g = [im[:, :, 1] for im in mixed]
b = [im[:, :, 2] for im in mixed]
return r, g, b
def with_stride_mixed_types_batch(images1, images2):
out1 = [im[:, :, 2] for im in images1]
out2 = [one_channel_normalize(im) for im in images2]
out3 = [im1 > im2 for (im1, im2) in zip(images1, images2)]
return out1, out2, out3
def test_split_and_mix_batch():
run_multi_input_multi_output(mix_and_split_batch, multi_batch_compare, batch=True)
def test_output_with_stride_mixed_types_batch():
run_multi_input_multi_output(with_stride_mixed_types_batch, multi_batch_compare, batch=True)
@raises(Exception, "must be a tuple")
def test_not_a_tuple():
invalid_pipe = TwoOutputsPythonOperatorPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED,
images_dir, flip_batch)
invalid_pipe.build()
invalid_pipe.run()
@raises(Exception, "must be a tuple")
def test_not_a_tuple_dl():
invalid_pipe = TwoOutputsPythonOperatorPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED,
images_dir, dlflip_batch,
op=ops.DLTensorPythonFunction)
invalid_pipe.build()
invalid_pipe.run()
def three_outputs(inp):
return inp, inp, inp
@raises(Exception, glob="Unexpected number of outputs*got 3*expected 2")
def test_wrong_outputs_number():
invalid_pipe = TwoOutputsPythonOperatorPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED,
images_dir, three_outputs)
invalid_pipe.build()
invalid_pipe.run()
@raises(Exception, glob="Unexpected number of outputs*got 3*expected 2")
def test_wrong_outputs_number_dl():
invalid_pipe = TwoOutputsPythonOperatorPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED,
images_dir, three_outputs,
op=ops.DLTensorPythonFunction)
invalid_pipe.build()
invalid_pipe.run()
SINK_PATH = tempfile.mkdtemp()
def save(image):
Image.fromarray(image).save(SINK_PATH + '/sink_img' + str(time.process_time()) + '.jpg', 'JPEG')
def test_sink():
pipe = SinkTestPipeline(BATCH_SIZE, DEVICE_ID, SEED, images_dir, save)
pipe.build()
if not os.path.exists(SINK_PATH):
os.mkdir(SINK_PATH)
assert len(glob.glob(SINK_PATH + '/sink_img*')) == 0
pipe.run()
created_files = glob.glob(SINK_PATH + '/sink_img*')
print(created_files)
assert len(created_files) == BATCH_SIZE
for file in created_files:
os.remove(file)
os.rmdir(SINK_PATH)
counter = 0
def func_with_side_effects(images):
global counter
counter = counter + 1
return numpy.full_like(images, counter)
def test_func_with_side_effects():
pipe_one = PythonOperatorPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED, images_dir,
func_with_side_effects, prefetch_queue_depth=1)
pipe_two = PythonOperatorPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED, images_dir,
func_with_side_effects, prefetch_queue_depth=1)
pipe_one.build()
pipe_two.build()
global counter
for it in range(ITERS):
counter = 0
out_one, = pipe_one.run()
out_two, = pipe_two.run()
assert counter == len(out_one) + len(out_two)
elems_one = [out_one.at(s)[0][0][0] for s in range(BATCH_SIZE)]
elems_one.sort()
assert elems_one == [i for i in range(1, BATCH_SIZE + 1)]
elems_two = [out_two.at(s)[0][0][0] for s in range(BATCH_SIZE)]
elems_two.sort()
assert elems_two == [i for i in range(BATCH_SIZE + 1, 2 * BATCH_SIZE + 1)]
class AsyncPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, _seed):
super().__init__(batch_size, num_threads, device_id, seed=_seed, exec_async=True,
exec_pipelined=True)
self.op = ops.PythonFunction(function=lambda: numpy.zeros([2, 2, 2]))
def define_graph(self):
return self.op()
def test_output_layout():
pipe = CommonPipeline(1, 1, 0, 999, images_dir)
with pipe:
images, _ = pipe.load()
out1, out2 = fn.python_function(images, function=lambda x: (x, x.mean(2)), num_outputs=2,
output_layouts=['ABC', 'DE'])
out3, out4 = fn.python_function(images, function=lambda x: (x, x / 2), num_outputs=2,
output_layouts='FGH')
out5, out6 = fn.python_function(images, function=lambda x: (x, x / 2), num_outputs=2,
output_layouts=['IJK'])
pipe.set_outputs(out1, out2, out3, out4, out5, out6)
pipe.build()
out1, out2, out3, out4, out5, out6 = pipe.run()
assert out1.layout() == 'ABC'
assert out2.layout() == 'DE'
assert out3.layout() == 'FGH'
assert out4.layout() == 'FGH'
assert out5.layout() == 'IJK'
assert out6.layout() == ''
@raises(RuntimeError, "*length of*output_layouts*greater than*")
def test_invalid_layouts_arg():
pipe = Pipeline(1, 1, 0, 999, exec_async=False, exec_pipelined=False)
with pipe:
out = fn.python_function(function=lambda: numpy.zeros((1, 1)), output_layouts=['HW', 'HWC'])
pipe.set_outputs(out)
pipe.build()
pipe.run()
def test_python_function_conditionals():
batch_size = 32
@pipeline_def(device_id=0, batch_size=batch_size, num_threads=4, exec_async=False,
exec_pipelined=False, enable_conditionals=True)
def py_fun_pipeline():
predicate = fn.external_source(
source=lambda sample_info: numpy.array(sample_info.idx_in_batch < batch_size / 2),
batch=False)
if predicate:
out1, out2 = fn.python_function(predicate, num_outputs=2,
function=lambda _: (numpy.array(42), numpy.array(10)))
else:
out1 = fn.python_function(function=lambda: numpy.array(0))
out2 = types.Constant(numpy.array(0), device="cpu", dtype=types.INT64)
return out1, out2
pipe = py_fun_pipeline()
pipe.build()
pipe.run()
|
DALI-main
|
dali/test/python/operator_2/test_python_function.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali.fn as fn
from nvidia.dali.pipeline import Pipeline
from nose.tools import nottest
import numpy as np
from test_utils import np_type_to_dali
from nose_utils import assert_raises
class Batch:
def __init__(self, data_type):
self._data_type = data_type
self._index = 0
def __call__(self):
batch = self._data[self._index]
self._index = (self._index + 1) % self.batch_size()
return batch
def batch_size(self):
return len(self._data[0])
def num_iter(self):
return 2 * len(self._data)
def reset(self):
self._index = 0
class Batch1D(Batch):
def __init__(self, data_type):
super().__init__(data_type)
self._data = [
[
np.array([1, 2, 3, 4], dtype=self._data_type),
np.array([33, 2, 10, 10], dtype=self._data_type)
], [
np.array([10, 20, 30, 20], dtype=self._data_type),
np.array([33, 2, 15, 19], dtype=self._data_type)
]]
def valid_axes(self):
return [None, (), 0]
class Batch2D(Batch):
def __init__(self, data_type):
super().__init__(data_type)
self._data = [
[
np.array([[1, 0, 2], [3, 1, 4]], dtype=self._data_type),
np.array([[5, 0, 6], [7, 0, 8]], dtype=self._data_type)
], [
np.array([[13, 23, 22], [23, 21, 14]], dtype=self._data_type),
np.array([[23, 3, 6], [7, 0, 20]], dtype=self._data_type)
]]
def valid_axes(self):
return [None, (), 0, 1, (0, 1)]
class Batch3D(Batch):
def __init__(self, data_type):
super().__init__(data_type)
self._data = [
[
np.array([[[1, 0, 1], [2, 3, 1]], [[0, 4, 1], [0, 4, 1]]], dtype=self._data_type),
np.array([[[5, 0, 1], [6, 7, 1]], [[0, 8, 1], [0, 4, 1]]], dtype=self._data_type)
], [
np.array([[[9, 0, 3], [3, 3, 3]], [[7, 0, 3], [0, 6, 8]]], dtype=self._data_type),
np.array([[[7, 2, 3], [7, 8, 2]], [[3, 9, 2], [2, 6, 2]]], dtype=self._data_type)
]]
def valid_axes(self):
return [None, (), 0, 1, 2, (0, 1), (0, 2), (1, 2), (0, 1, 2)]
class Batch3DOverflow(Batch3D):
def __init__(self, data_type):
super().__init__(data_type)
for batch in self._data:
for sample in batch:
sample *= 100000
class Batch3DNegativeAxes(Batch3D):
def valid_axes(self):
return [-3, -2, -1, (-3, 1), (0, -1), (-2, 2), (-3, -2, -1)]
def get_expected_layout(in_layout, axes, keep_dims):
in_layout = in_layout or ""
if keep_dims or not in_layout:
return in_layout
if axes is None:
return ""
if isinstance(axes, int):
axes = [axes]
ndim = len(in_layout)
axes = [(axis + ndim) % ndim for axis in axes]
return "".join(c for i, c in enumerate(in_layout) if i not in axes)
def check_layout(tensor, in_layout, axes, keep_dims):
expected_layout = get_expected_layout(in_layout, axes, keep_dims)
assert tensor.layout() == expected_layout, \
f"Layout mismatch. Got: `{tensor.layout()}`, expected `{expected_layout}` (axes: {axes})"
def run_dali(reduce_fn, batch_fn, keep_dims, axes, output_type, add_mean_input=False, ddof=0,
layout=None):
batch_size = batch_fn.batch_size()
# Needed due to how ExternalSource API works. It fails on methods, partials.
def get_batch():
return batch_fn()
result_cpu = []
result_gpu = []
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
args = {'keep_dims': keep_dims, 'axes': axes}
if output_type is not None:
args['dtype'] = np_type_to_dali(output_type)
with pipe:
input = fn.external_source(source=get_batch, layout=layout)
if not add_mean_input:
reduced_cpu = reduce_fn(input, **args)
reduced_gpu = reduce_fn(input.gpu(), **args)
else:
mean = fn.reductions.mean(input, **args)
args['ddof'] = ddof
reduced_cpu = reduce_fn(input, mean, **args)
reduced_gpu = reduce_fn(input.gpu(), mean.gpu(), **args)
pipe.set_outputs(reduced_cpu, reduced_gpu)
pipe.build()
for _ in range(batch_fn.num_iter()):
output = pipe.run()
check_layout(output[0], layout, axes, keep_dims)
check_layout(output[1], layout, axes, keep_dims)
reduced_cpu = output[0].as_array()
reduced_gpu = output[1].as_cpu().as_array()
result_cpu.append(reduced_cpu)
result_gpu.append(reduced_gpu)
return result_cpu, result_gpu
def run_numpy(reduce_fn, batch_fn, keep_dims, axes, output_type, ddof=None):
result = []
args = {'keepdims': keep_dims, 'axis': axes}
if output_type is not None:
args['dtype'] = output_type
if ddof is not None:
args['ddof'] = ddof
for _ in range(batch_fn.num_iter()):
batch = batch_fn()
sample_result = []
for sample in batch:
sample_reduced = reduce_fn(sample, **args)
sample_result.append(sample_reduced)
result.append(sample_result)
return result
def compare(dali_res, np_res):
for dali_sample, np_sample in zip(dali_res, np_res):
assert dali_sample.shape == np_sample.shape
if dali_res[0].dtype == np.float32:
assert np.allclose(dali_sample, np_sample)
else:
if not np.array_equal(dali_sample, np_sample):
print(dali_sample)
print(np_sample)
assert np.array_equal(dali_sample, np_sample)
def np_mean_square(input, keepdims=False, axis=None, dtype=None):
return np.mean(np.square(input), keepdims=keepdims, axis=axis, dtype=dtype)
def np_root_mean_square(input, keepdims=False, axis=None, dtype=None):
return np.sqrt(np_mean_square(input, keepdims=keepdims, axis=axis, dtype=dtype))
reduce_fns = {
"sum": (fn.reductions.sum, np.sum),
"min": (fn.reductions.min, np.min),
"max": (fn.reductions.max, np.max),
"mean": (fn.reductions.mean, np.mean),
"mean_square": (fn.reductions.mean_square, np_mean_square),
"rms": (fn.reductions.rms, np_root_mean_square),
"std_dev": (fn.reductions.std_dev, np.std),
"variance": (fn.reductions.variance, np.var),
}
def run_reduce(keep_dims, reduction_name, batch_gen, input_type, output_type=None, layout=None):
batch_fn = batch_gen(input_type)
dali_reduce_fn, numpy_reduce_fn = reduce_fns[reduction_name]
for axes in batch_fn.valid_axes():
dali_res_cpu, dali_res_gpu = run_dali(
dali_reduce_fn, batch_fn, keep_dims=keep_dims, axes=axes, output_type=output_type,
layout=layout)
batch_fn.reset()
np_res = run_numpy(
numpy_reduce_fn, batch_fn, keep_dims=keep_dims, axes=axes, output_type=output_type)
for iteration in range(batch_fn.num_iter()):
compare(dali_res_cpu[iteration], np_res[iteration])
compare(dali_res_gpu[iteration], np_res[iteration])
def test_reduce():
reductions = ["sum", "min", "max"]
batch_gens = [Batch1D, Batch2D, Batch3D]
types = [
np.uint8, np.int8,
np.uint16, np.int16,
np.uint32, np.int32,
np.uint64, np.int64,
np.float32
]
rng = np.random.default_rng(1000)
for keep_dims in [False, True]:
for reduction_name in reductions:
for ndim, batch_gen in enumerate(batch_gens, start=1):
type_id = rng.choice(types)
layout = rng.choice([None, "XYZ"[:ndim]])
yield run_reduce, keep_dims, reduction_name, batch_gen, type_id, None, layout
def test_reduce_negative_axes():
reductions = ["sum", "max"]
type = np.uint8
for layout in ["FGH", None]:
for keep_dims in [False, True]:
for reduction_name in reductions:
yield run_reduce, keep_dims, reduction_name, Batch3DNegativeAxes, type, None, layout
def test_reduce_invalid_axes():
class Batch3DInvalidAxes(Batch3D):
def valid_axes(self): # Invalid axes
return [-100, (100, 0)]
batch_fn = Batch3DInvalidAxes(np.uint8)
dali_reduce_fn, numpy_reduce_fn = reduce_fns["sum"]
for axes in batch_fn.valid_axes():
with assert_raises(RuntimeError, glob="Axis index out of range"):
dali_res_cpu, dali_res_gpu = run_dali(
dali_reduce_fn, batch_fn, keep_dims=False, axes=axes, output_type=np.uint8)
def test_reduce_with_promotion():
reductions = ["rms", "mean_square"]
batch_gens = [Batch3D]
types = [np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.float32]
rng = np.random.default_rng(1041)
for keep_dims in [False, True]:
for reduction_name in reductions:
for batch_gen in batch_gens:
for type_id in types:
layout = rng.choice([None, "ABC"])
yield run_reduce, keep_dims, reduction_name, batch_gen, type_id, None, layout
def test_reduce_with_promotion_with_overflow():
reductions = ["sum", "mean"]
batch_gens = [Batch3DOverflow]
types = [np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.float32]
rng = np.random.default_rng(1042)
for keep_dims in [False, True]:
for reduction_name in reductions:
for batch_gen in batch_gens:
for type_id in types:
layout = rng.choice([None, "ABC"])
yield run_reduce, keep_dims, reduction_name, batch_gen, type_id, None, layout
def test_sum_with_output_type():
reductions = ["sum"]
batch_gens = [Batch3DOverflow]
types = [
(np.uint8, [np.uint64, np.float32]),
(np.int8, [np.int64, np.float32]),
(np.uint16, [np.uint64, np.float32]),
(np.int16, [np.int64, np.float32]),
(np.uint32, [np.uint64, np.float32]),
(np.int32, [np.int32, np.int64, np.float32])]
rng = np.random.default_rng(1043)
for reduction_name in reductions:
for batch_gen in batch_gens:
for type_map in types:
input_type = type_map[0]
keep_dims = np.random.choice([False, True])
for output_type in type_map[1]:
layout = rng.choice([None, "RGB"])
yield run_reduce, \
keep_dims, reduction_name, batch_gen, input_type, output_type, layout
def run_reduce_with_mean_input(keep_dims, reduction_name, batch_gen, input_type, output_type=None,
layout=None):
batch_fn = batch_gen(input_type)
dali_reduce_fn, numpy_reduce_fn = reduce_fns[reduction_name]
for axes in batch_fn.valid_axes():
if axes == ():
valid_ddofs = [0]
elif axes is None:
valid_ddofs = [0, 1, 2, 3]
else:
valid_ddofs = [0, 1]
for ddof in valid_ddofs:
dali_res_cpu, dali_res_gpu = run_dali(
dali_reduce_fn, batch_fn, keep_dims=keep_dims, axes=axes, output_type=output_type,
add_mean_input=True, ddof=ddof, layout=layout)
batch_fn.reset()
np_res = run_numpy(
numpy_reduce_fn, batch_fn, keep_dims=keep_dims, axes=axes,
output_type=output_type, ddof=ddof)
for iteration in range(batch_fn.num_iter()):
compare(dali_res_cpu[iteration], np_res[iteration])
compare(dali_res_gpu[iteration], np_res[iteration])
def test_reduce_with_mean_input():
reductions = ["std_dev", "variance"]
batch_gens = [Batch1D, Batch2D, Batch3D]
types = [
np.uint8, np.int8,
np.uint16, np.int16,
np.uint32, np.int32,
np.uint64, np.int64,
np.float32
]
rng = np.random.default_rng(1044)
for keep_dims in [False, True]:
for reduction_name in reductions:
for ndim, batch_gen in enumerate(batch_gens, start=1):
type_id = np.random.choice(types)
layout = rng.choice([None, "CDE"[:ndim]])
yield run_reduce_with_mean_input, keep_dims, reduction_name, batch_gen, \
type_id, None, layout
def run_and_compare_with_layout(batch_gen, pipe):
for _ in range(batch_gen.num_iter()):
output = pipe.run()
assert output[0].layout() == output[1].layout(), \
f"{output[0].layout()} vs {output[1].layout()}"
reduced = output[0].as_array()
reduced_by_name = output[1].as_array()
assert np.array_equal(reduced, reduced_by_name)
def run_reduce_with_layout(batch_size, get_batch, reduction, axes, axis_names, batch_fn):
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
with pipe:
input = fn.external_source(source=get_batch, layout="ABC")
reduced = reduction(input, keep_dims=False, axes=axes)
reduced_by_name = reduction(input, keep_dims=False, axis_names=axis_names)
pipe.set_outputs(reduced, reduced_by_name)
pipe.build()
run_and_compare_with_layout(batch_fn, pipe)
def run_reduce_with_layout_with_mean_input(batch_size, get_batch, reduction, axes,
axis_names, batch_fn):
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
with pipe:
input = fn.external_source(source=get_batch, layout="ABC")
mean = fn.reductions.mean(input, axes=axes)
reduced = reduction(input, mean, keep_dims=False, axes=axes)
reduced_by_name = reduction(input, mean, keep_dims=False, axis_names=axis_names)
pipe.set_outputs(reduced, reduced_by_name)
pipe.build()
run_and_compare_with_layout(batch_fn, pipe)
def test_reduce_axis_names():
reductions = [
fn.reductions.max,
fn.reductions.min,
fn.reductions.mean,
fn.reductions.mean_square,
fn.reductions.sum,
fn.reductions.rms]
reductions_with_mean_input = [
fn.reductions.std_dev, fn.reductions.variance]
batch_fn = Batch3D(np.float32)
batch_size = batch_fn.batch_size()
def get_batch():
return batch_fn()
axes_and_names = [
((), ''),
(0, 'A'),
(1, 'B'),
(2, 'C'),
((0, 1), 'AB'),
((0, 2), 'AC'),
((1, 2), 'BC'),
((0, 1, 2), 'ABC')]
for axes, axis_names in axes_and_names:
for reduction in reductions:
yield run_reduce_with_layout, batch_size, get_batch, reduction, axes, \
axis_names, batch_fn
for reduction in reductions_with_mean_input:
yield run_reduce_with_layout_with_mean_input, batch_size, get_batch, reduction, \
axes, axis_names, batch_fn
_random_buf = None
_random_lo = 0
_random_hi = 1
def fast_large_random_batches(rank, batch_size, num_batches, lo=0, hi=1):
max_vol = 10000000
max_extent = min(65536, int(np.floor(max_vol**(1/rank))))
# generate a maximum size buffer pre-filled with random numbers
global _random_buf
global _random_lo
global _random_hi
should_generate = _random_buf is None or _random_buf.size < max_extent**rank \
or _random_lo != lo or _random_hi != hi
if should_generate:
_random_lo = lo
_random_hi = hi
_random_buf = np.random.uniform(low=lo, high=hi, size=max_vol).astype(np.float32)
data = []
for _ in range(num_batches):
batch = []
for _ in range(batch_size):
size = np.random.randint(1, max_extent, size=rank)
vol = np.prod(size)
# now that we know the actual volume of the sample, we can pick a random
# location in the pre-filled buffer
offset = np.random.randint(0, (_random_buf.size - vol) + 1)
# take a slice and reshape it to the desired shape - these are constant time operations
sample = _random_buf[offset:offset+vol].reshape(size)
batch.append(sample)
data.append(batch)
return data
@nottest
def _test_reduce_large_data(rank, axes, device, in_layout):
batch_size = 16
num_batches = 2
data = fast_large_random_batches(rank, batch_size, num_batches)
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0 if device == 'gpu' else None)
input = fn.external_source(data, cycle=True, device=device, layout=in_layout)
reduced = fn.reductions.sum(input, axes=axes)
pipe.set_outputs(reduced)
pipe.build()
for b, batch in enumerate(data):
out, = pipe.run()
check_layout(out, in_layout, axes, False)
if device == 'gpu':
out = out.as_cpu()
for i in range(batch_size):
ref = np.sum(batch[i].astype(np.float64), axis=axes)
assert np.allclose(out[i], ref, 1e-5, 1e-5)
def test_reduce_large_data():
np.random.seed(12344)
for device in ['cpu', 'gpu']:
for rank in [1, 2, 3]:
for axis_mask in range(1, 2**rank):
layout = np.random.choice([None, "DALI"[:rank]])
axes = tuple(filter(lambda x: x >= 0,
(i if axis_mask & (1 << i) else -1 for i in range(rank))))
yield _test_reduce_large_data, rank, axes, device, layout
@nottest
def _test_std_dev_large_data(rank, axes, device, in_layout):
batch_size = 16
num_batches = 2
data = fast_large_random_batches(rank, batch_size, num_batches)
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0 if device == 'gpu' else None)
input = fn.external_source(data, cycle=True, device=device, layout=in_layout)
mean = fn.reductions.mean(input, axes=axes)
reduced = fn.reductions.std_dev(input, mean, axes=axes, ddof=0)
pipe.set_outputs(reduced)
pipe.build()
for b, batch in enumerate(data):
out, = pipe.run()
check_layout(out, in_layout, axes, False)
if device == 'gpu':
out = out.as_cpu()
for i in range(batch_size):
ref = np.std(batch[i].astype(np.float64), axis=axes, ddof=0)
assert np.allclose(out[i], ref, 1e-5, 1e-5)
def test_std_dev_large_data():
np.random.seed(12344)
for device in ['cpu', 'gpu']:
for rank in [1, 2, 3, 4]:
for axis_mask in range(1, 2**rank):
layout = np.random.choice([None, "DALI"[:rank]])
axes = tuple(filter(lambda x: x >= 0,
(i if axis_mask & (1 << i) else -1 for i in range(rank))))
yield _test_std_dev_large_data, rank, axes, device, layout
|
DALI-main
|
dali/test/python/operator_2/test_reduce.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import numpy as np
from functools import partial
from test_utils import compare_pipelines
from test_utils import RandomDataIterator
from nose_utils import raises
class ToDecibelsPipeline(Pipeline):
def __init__(self, device, batch_size, iterator, multiplier, reference, cutoff_db,
num_threads=1, device_id=0):
super(ToDecibelsPipeline, self).__init__(batch_size, num_threads, device_id)
self.device = device
self.iterator = iterator
self.inputs = ops.ExternalSource()
self.dB = ops.ToDecibels(device=self.device,
multiplier=multiplier,
reference=reference,
cutoff_db=cutoff_db)
def define_graph(self):
self.data = self.inputs()
out = self.data.gpu() if self.device == 'gpu' else self.data
out = self.dB(out)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data)
def to_db_func(multiplier, reference, cutoff_db, input_data):
if not reference:
reference = np.amax(input_data)
min_ratio = 10 ** (cutoff_db / multiplier)
out = multiplier * np.log10(np.maximum(min_ratio, input_data / reference))
return out
class ToDecibelsPythonPipeline(Pipeline):
def __init__(self, device, batch_size, iterator, multiplier, reference, cutoff_db,
num_threads=1, device_id=0, func=to_db_func):
super(ToDecibelsPythonPipeline, self).__init__(
batch_size, num_threads, device_id,
seed=12345, exec_async=False, exec_pipelined=False)
self.device = "cpu"
self.iterator = iterator
self.inputs = ops.ExternalSource()
function = partial(func, multiplier, reference, cutoff_db)
self.dB = ops.PythonFunction(function=function)
def define_graph(self):
self.data = self.inputs()
out = self.dB(self.data)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data)
def check_operator_to_decibels_vs_python(device, batch_size, input_shape,
multiplier, reference, cutoff_db):
eii1 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
eii2 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
compare_pipelines(
ToDecibelsPipeline(
device, batch_size, iter(eii1),
multiplier=multiplier, reference=reference, cutoff_db=cutoff_db),
ToDecibelsPythonPipeline(
device, batch_size, iter(eii2),
multiplier=multiplier, reference=reference, cutoff_db=cutoff_db),
batch_size=batch_size, N_iterations=3, eps=1e-04)
def test_operator_to_decibels_vs_python():
for device in ['cpu', 'gpu']:
for batch_size in [3]:
for multiplier, reference, cutoff_db, shape in [(10.0, None, -80.0, (1, 4096)),
(20.0, 1.0, -200.0, (2, 1000)),
(20.0, 1e-6, -120.0, (2, 3, 40))]:
yield check_operator_to_decibels_vs_python, device, batch_size, shape, \
multiplier, reference, cutoff_db
class NaturalLogarithmPipeline(Pipeline):
def __init__(self, device, iterator, batch_size, num_threads=1, exec_async=True,
exec_pipelined=True):
super(NaturalLogarithmPipeline, self).__init__(batch_size, num_threads, device_id=0,
seed=42, exec_async=exec_async,
exec_pipelined=exec_pipelined)
self.device = device
self.inputs = ops.ExternalSource()
self.iterator = iterator
self.log = None
def define_graph(self):
if self.log is None:
raise RuntimeError(
"Error: you need to derive from this class and define `self.log` operator")
self.data = self.inputs()
data = self.data.gpu() if self.device == 'gpu' else self.data
out = self.log(data + 1)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data)
class NLDaliPipeline(NaturalLogarithmPipeline):
def __init__(self, device, iterator, batch_size, reference=1.0):
super().__init__(device, iterator, batch_size)
self.log = ops.ToDecibels(device=device, multiplier=np.log(10), reference=reference)
def log_tensor(tensor):
return np.log(tensor)
class NLPythonPipeline(NaturalLogarithmPipeline):
def __init__(self, iterator, batch_size):
super().__init__('cpu', iterator, batch_size,
exec_async=False, exec_pipelined=False)
function = partial(log_tensor)
self.log = ops.PythonFunction(function=function)
def check_natural_logarithm(device, batch_size, input_shape):
eii1 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
eii2 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
compare_pipelines(
NLDaliPipeline(device, iter(eii1), batch_size),
NLPythonPipeline(iter(eii2), batch_size),
batch_size=batch_size, N_iterations=3, eps=1e-04)
def test_operator_natural_logarithm():
shapes = [(1, 4096), (2, 1000), (2, 3, 40)]
batch_size = 3
for device in ['cpu', 'gpu']:
for sh in shapes:
yield check_natural_logarithm, device, batch_size, sh
@raises(RuntimeError, glob="`reference` argument can't be zero")
def test_invalid_reference():
NLDaliPipeline('cpu', None, 1, reference=0.0).build()
|
DALI-main
|
dali/test/python/operator_2/test_to_decibels.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nose_utils import assert_raises
import numpy as np
import nvidia.dali as dali
import nvidia.dali.fn as fn
import random
from segmentation_test_utils import make_batch_select_masks
random.seed(1234)
np.random.seed(4321)
def check_select_masks(batch_size,
npolygons_range=(1, 10),
nvertices_range=(3, 40),
vertex_ndim=2,
vertex_dtype=np.float32,
reindex_masks=False):
def get_data_source(*args, **kwargs):
return lambda: make_batch_select_masks(*args, **kwargs)
pipe = dali.pipeline.Pipeline(batch_size=batch_size, num_threads=4, device_id=0, seed=1234)
with pipe:
polygons, vertices, mask_ids = fn.external_source(
source=get_data_source(batch_size,
npolygons_range=npolygons_range,
nvertices_range=nvertices_range,
vertex_ndim=vertex_ndim,
vertex_dtype=vertex_dtype),
num_outputs=3, device='cpu'
)
out_polygons, out_vertices = fn.segmentation.select_masks(
mask_ids, polygons, vertices, reindex_masks=reindex_masks
)
pipe.set_outputs(polygons, vertices, mask_ids, out_polygons, out_vertices)
pipe.build()
for iter in range(3):
outputs = pipe.run()
for idx in range(batch_size):
in_polygons = outputs[0].at(idx)
in_vertices = outputs[1].at(idx)
mask_ids = outputs[2].at(idx)
out_polygons = outputs[3].at(idx)
out_vertices = outputs[4].at(idx)
in_polygons_dict = {}
for k in range(in_polygons.shape[0]):
mask_id = in_polygons[k, 0]
in_polygons_dict[mask_id] = (in_polygons[k, 1], in_polygons[k, 2])
if reindex_masks:
index_map = {}
for idx in range(len(mask_ids)):
index_map[mask_ids[idx]] = idx
vertex_count = 0
for m in range(len(mask_ids)):
mask_id = mask_ids[m]
in_vertex_start, in_vertex_end = in_polygons_dict[mask_id]
in_nvertices = in_vertex_end - in_vertex_start
expected_out_mask_id = index_map[mask_id] if reindex_masks else mask_id
out_mask_id, out_vertex_start, out_vertex_end = out_polygons[m]
assert out_mask_id == expected_out_mask_id
assert out_vertex_start == vertex_count
assert out_vertex_end == (vertex_count + in_nvertices)
vertex_count = vertex_count + in_nvertices
expected_out_vertex = in_vertices[in_vertex_start:in_vertex_end]
out_vertex = out_vertices[out_vertex_start:out_vertex_end]
assert (expected_out_vertex == out_vertex).all()
def test_select_masks():
npolygons_range = (1, 10)
nvertices_range = (3, 40)
for batch_size in [1, 3]:
for vertex_ndim in [2, 3, 6]:
for vertex_dtype in [np.float, random.choice([np.int8, np.int16, np.int32, np.int64])]:
reindex_masks = random.choice([False, True])
yield (check_select_masks,
batch_size,
npolygons_range,
nvertices_range,
vertex_ndim,
vertex_dtype,
reindex_masks)
@dali.pipeline_def(batch_size=1, num_threads=4, device_id=0, seed=1234)
def wrong_input_pipe(data_source_fn, reindex_masks=False):
polygons, vertices, mask_ids = fn.external_source(source=data_source_fn, num_outputs=3,
device='cpu')
out_polygons, out_vertices = fn.segmentation.select_masks(mask_ids, polygons, vertices,
reindex_masks=reindex_masks)
return polygons, vertices, mask_ids, out_polygons, out_vertices
def _test_select_masks_wrong_input(data_source_fn, err_regex):
p = wrong_input_pipe(data_source_fn=data_source_fn)
p.build()
with assert_raises(RuntimeError, regex=err_regex):
_ = p.run()
def test_select_masks_wrong_mask_ids():
def test_data():
polygons = [np.array([[0, 0, 2], [1, 3, 5], [2, 6, 8]], dtype=np.int32)]
vertices = [np.array(np.random.rand(9, 2), dtype=np.float32)]
mask_ids = [np.array([10, 11], dtype=np.int32)] # out of bounds ids
return polygons, vertices, mask_ids
_test_select_masks_wrong_input(lambda: test_data(),
err_regex="Selected mask_id .* is not present in the input\\.")
def test_select_masks_wrong_mask_meta_dim():
def test_data():
# Expects 3 integers, not 4
polygons = [np.array([[0, 0, 2, -1], [1, 3, 5, -1], [2, 6, 8, -1]], dtype=np.int32)]
vertices = [np.array(np.random.rand(9, 2), dtype=np.float32)]
mask_ids = [np.array([0], dtype=np.int32)]
return polygons, vertices, mask_ids
_test_select_masks_wrong_input(
lambda: test_data(),
err_regex="``polygons`` is expected to contain 2D tensors with 3 columns: "
"``mask_id, start_idx, end_idx``\\. Got \\d* columns\\.")
def test_select_masks_wrong_vertex_ids():
def test_data():
polygons = [np.array([[0, 0, 20]], dtype=np.int32)] # Out of bounds vertex index
vertices = [np.array(np.random.rand(3, 2), dtype=np.float32)] # Only 3 vertices
mask_ids = [np.array([0], dtype=np.int32)]
return polygons, vertices, mask_ids
_test_select_masks_wrong_input(
lambda: test_data(),
err_regex="Vertex index range for mask id .* is out of bounds\\. "
"Expected to be within the range of available vertices .*\\.")
|
DALI-main
|
dali/test/python/operator_2/test_segmentation_select_masks.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import pipeline_def, Pipeline
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import numpy as np
from test_utils import check_batch, RandomlyShapedDataIterator
from nose_utils import assert_raises
from nose2.tools import params
test_iters = 4
def to_batch(tl, batch_size):
return [np.array(tl[i]) for i in range(batch_size)]
@pipeline_def
def rotate_pipe(dev):
input = fn.external_source(name="input", device=dev)
return fn.rotate(input, angle=15)
@pipeline_def
def flip_pipe(dev):
input = fn.external_source(name="input", device=dev)
return fn.flip(input, horizontal=True)
@pipeline_def
def conditional_split_merge_pipe(dev):
input = fn.external_source(name="input", device=dev)
pred = fn.external_source(name="predicate")
true_branch, false_branch = fn._conditional.split(input, predicate=pred)
true_rotated = fn.rotate(true_branch, angle=15)
false_flipped = fn.flip(false_branch, horizontal=True)
return fn._conditional.merge(true_rotated, false_flipped, predicate=pred)
def check_conditional_split_merge(dev, pred_gen):
bs = 10
kwargs = {
"batch_size": bs,
"num_threads": 4,
"device_id": 0,
"prefetch_queue_depth": 1 # so that it's easier to use external source
}
pipe_sm = conditional_split_merge_pipe(dev, **kwargs)
pipe_true = rotate_pipe(dev, **kwargs)
pipe_false = flip_pipe(dev, **kwargs)
pipe_sm.build()
pipe_true.build()
pipe_false.build()
data_iter = RandomlyShapedDataIterator(bs, min_shape=(20, 20, 3), max_shape=(40, 30, 3))
data_iter = iter(data_iter)
for _ in range(test_iters):
predicate = [pred_gen(i) for i in range(bs)]
data = next(data_iter)
data_true = [data[i] for i in range(bs) if predicate[i]]
data_false = [data[i] for i in range(bs) if not predicate[i]]
pipe_sm.feed_input("input", data)
pipe_sm.feed_input("predicate", predicate)
if data_true:
pipe_true.feed_input("input", data_true)
out_true, = pipe_true.run()
else:
out_true = []
if data_false:
pipe_false.feed_input("input", data_false)
out_false, = pipe_false.run()
else:
out_false = []
out, = pipe_sm.run()
out_baseline = []
idx_true = 0
idx_false = 0
for p in predicate:
if p:
out_baseline.append(out_true[idx_true])
idx_true = idx_true + 1
else:
out_baseline.append(out_false[idx_false])
idx_false = idx_false + 1
if dev == "gpu":
out = [out[i].as_cpu() for i in range(bs)]
out_baseline = [out_baseline[i].as_cpu() for i in range(bs)]
check_batch(out, out_baseline, bs)
def test_conditional_split_merge():
rng = np.random.default_rng()
for dev in ["cpu", "gpu"]:
for pred_gen in [
lambda x: np.array(x < 3), lambda x: np.array(x % 2 == 0),
lambda x: np.array(x % 3 == 0), lambda _: np.array(False),
lambda _: rng.choice([np.array(True), np.array(False)])
]:
yield check_conditional_split_merge, dev, pred_gen
@pipeline_def
def conditional_split_merge_reinterpret_pipe(dtype, layout, shape):
batch_size = Pipeline.current().max_batch_size
input = fn.external_source(
source=[[np.full((10, 10, 3), 42, dtype=np.int32) for _ in range(batch_size)]], cycle=True)
pred = fn.external_source(
source=[[np.array(i % 2 == 0, dtype=bool) for i in range(batch_size)]], cycle=True)
true_branch, false_branch = fn._conditional.split(input, predicate=pred)
false_changed = fn.reinterpret(false_branch, dtype=dtype, layout=layout, shape=shape)
return fn._conditional.merge(true_branch, false_changed, predicate=pred)
def run_conditional_split_merge_reinterpret(dtype, layout, shape):
bs = 10
kwargs = {
"batch_size": bs,
"num_threads": 4,
"device_id": 0,
"prefetch_queue_depth": 1 # so that it's easier to use external source
}
pipe = conditional_split_merge_reinterpret_pipe(dtype, layout, shape, **kwargs)
pipe.build()
pipe.run()
@params((types.UINT32, None, None, "types*"),
(None, "HWC", None, "layouts*"),
(None, None, [10, -1], "sample dimensions*"))
def test_fail_conditional_split_merge(dtype, layout, shape, err_glob):
base = ("Divergent data found in different branches of conditional operation. All paths in "
"conditional operation are merged into one batch which must have consistent type, "
"number of dimensions, layout and other metadata. Found distinct ")
with assert_raises(RuntimeError, glob=base + err_glob):
run_conditional_split_merge_reinterpret(dtype, layout, shape)
|
DALI-main
|
dali/test/python/operator_2/test_split_merge.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# nose_utils goes first to deal with Python 3.10 incompatibility
from nose_utils import assert_raises
import nvidia.dali as dali
import nvidia.dali.fn as fn
import nvidia.dali.ops as ops
import numpy as np
import scipy.ndimage
import scipy.ndimage.measurements
import random
from test_utils import check_batch, np_type_to_dali
from nose.tools import nottest
from nose.plugins.attrib import attr
np.random.seed(1234)
random.seed(1234)
def count_outputs(outs):
if isinstance(outs, dali.pipeline.DataNode):
return 1
return len(outs)
data = [
np.int32([[1, 0, 0, 0],
[1, 2, 2, 1],
[1, 1, 2, 0],
[2, 0, 0, 1]]),
np.int32([[0, 3, 3, 0],
[1, 0, 1, 2],
[0, 1, 1, 0],
[0, 2, 0, 1],
[0, 2, 2, 1]])
]
def test_num_output():
"""Test that a proper number of outputs is produced, depending on arguments"""
inp = fn.external_source(data, batch=False)
assert len(fn.segmentation.random_object_bbox(inp)) == 2
for label_out_param in [None, False, True]:
label_out = 1 if label_out_param else 0
for format, num_box_outputs in [("anchor_shape", 2), ("start_end", 2), ("box", 1)]:
assert count_outputs(fn.segmentation.random_object_bbox(
inp, format=format, output_class=label_out_param)) == label_out + num_box_outputs
@nottest
def _test_use_foreground(classes, weights, bg):
inp = fn.external_source(data, batch=False, cycle="quiet")
pipe = dali.pipeline.Pipeline(10, 4, 0, 12345)
pipe_outs = fn.segmentation.random_object_bbox(inp,
output_class=True,
foreground_prob=1,
classes=classes,
class_weights=weights,
background=bg)
pipe.set_outputs(*pipe_outs)
pipe.build()
outs = pipe.run()
for i in range(len(outs[2])):
assert outs[2].at(i) != (bg or 0)
def test_use_foreground():
"""Test that a foreground box is returned when required (prob=1) and possible (fixed data)"""
for classes, weights, bg in [
(None, None, None),
(None, None, 1),
([1, 2, 3], None, None),
(None, [1, 1, 1], None),
(None, [1, 1, 1], 0),
([1, 2, 3], [1, 1, 1], None)]:
yield _test_use_foreground, classes, weights, bg
def objects2boxes(objects, input_shape):
if len(objects) == 0:
return np.int32([[0] * len(input_shape) + list(input_shape)])
return np.int32([
[s.start for s in obj] + [s.stop for s in obj] for obj in objects])
def box_extent(box):
n = len(box) // 2
return box[n:] - box[:n]
def box_volume(box):
return np.prod(box_extent(box))
def box_in_k_largest(boxes, box, k):
"""Returns True if `box` is one of `k` largest boxes in `boxes`. If there are ties that
extend beyond k, they are included."""
if len(boxes) == 0:
return False
boxes = sorted(boxes, reverse=True, key=box_volume)
n = len(boxes)
prev = box_volume(boxes[0])
for i in range(n):
vol = box_volume(boxes[i])
if i >= k:
if vol < prev:
break
prev = vol
if np.array_equal(boxes[i], box):
return True
return False
def all_boxes(array, classes=None, background=None):
if classes is not None:
labels = classes
assert background not in labels
else:
if background is None:
background = 0
labels = list(np.unique(array))
try:
labels.remove(background)
except ValueError:
pass
objects = []
for lbl in labels:
mask = array == lbl
cc, _ = scipy.ndimage.measurements.label(mask)
objs = scipy.ndimage.find_objects(cc)
if len(objs) > 0 and objs[0] is not None:
objects += objs
return objects2boxes(objects, array.shape)
def class_boxes(array, label):
mask = array == label
cc, _ = scipy.ndimage.measurements.label(mask)
objects = scipy.ndimage.find_objects(cc)
return objects2boxes(objects, array.shape)
def axis_indices(shape, axis):
r = np.arange(shape[axis])
r = np.expand_dims(r, list(range(0, axis)) + list(range(axis + 1, len(shape))))
rep = list(shape)
rep[axis] = 1
r = np.tile(r, rep)
return r
def indices(shape):
return np.stack([axis_indices(shape, axis) for axis in range(len(shape))], len(shape))
def generate_data(shape, num_classes, blobs_per_class):
"""Generates blobs_per_class gaussian blobs in ND `shape`-shaped array.
Each point is assigned a class at which the maximum blob intensity occurred - or background,
if intensity is below certain threshold. The threshold is adjusted to maintain a preset
precentage of background"""
radii = np.array([shape])
mean = np.random.random([num_classes, blobs_per_class, len(shape)]) * radii
sigma = (np.random.random([num_classes, blobs_per_class, len(shape)]) * 0.8 + 0.2) * radii / 2
mean = np.expand_dims(mean, list(range(2, len(shape) + 2)))
isigma = 1 / np.expand_dims(sigma, list(range(2, len(shape) + 2)))
pos = np.expand_dims(indices(shape), [0, 1])
g = np.exp(-np.sum(((pos - mean) * isigma) ** 2, axis=-1))
g = np.max(g, axis=1) # sum over blobs within class
maxfg = np.max(g, axis=0)
min_bg = 0.5
max_bg = 0.7
bg_lo = 0
bg_hi = 1
volume = np.prod(shape)
while bg_hi - bg_lo > 1e-2:
threshold = (bg_lo + bg_hi) / 2
bg_fraction = np.count_nonzero(maxfg < threshold) / volume
if bg_fraction < min_bg:
bg_lo = threshold
elif bg_fraction > max_bg:
bg_hi = threshold
else:
break
label = np.argmax(g, axis=0) + 1
label[maxfg < threshold] = 0
return label
def generate_samples(num_samples, ndim, dtype):
samples = []
for i in range(num_samples):
shape = list(np.random.randint(5, 13, [ndim]))
num_classes = np.random.randint(1, 10)
blobs_per_class = np.random.randint(1, 10)
samples.append(generate_data(shape, num_classes, blobs_per_class).astype(dtype))
return samples
def batch_generator(batch_size, ndim, dtype):
"""Returns a generator that generates completely new data each time it's called"""
def gen():
# batch_size = np.random.randint(1, max_batch_size+1)
return generate_samples(batch_size, ndim, dtype)
return gen
def sampled_dataset(dataset_size, batch_size, ndim, dtype):
"""Returns a generator that returns random samples from a pre-generated dataset"""
data = generate_samples(dataset_size, ndim, dtype)
def gen():
# batch_size = np.random.randint(1, max_batch_size+1)
return [random.choice(data) for _ in range(batch_size)]
return gen
def random_background():
return fn.random.uniform(range=(-5, 10), dtype=dali.types.INT32, seed=12321)
def random_classes(background):
def get_classes():
tmp = list(np.flatnonzero(np.random.random([10]) > 0.5))
try:
tmp.remove(background)
except ValueError:
pass # Python, Y U no have try_remove?
return np.int32(tmp)
return fn.external_source(get_classes, batch=False)
def random_weights():
def get_weights():
tmp = np.random.random(np.random.randint(1, 10)).astype(np.float32)
return tmp
return fn.external_source(get_weights, batch=False)
def random_threshold(ndim):
return fn.random.uniform(range=(1, 5), shape=[ndim], dtype=dali.types.INT32, seed=13231)
def contains_box(boxes, box):
return (boxes == box).all(axis=1).any()
def convert_boxes(outs, format):
if format == "box":
return outs[0]
elif format == "start_end":
return [np.concatenate([start, end]) for start, end in zip(outs[0], outs[1])]
elif format == "anchor_shape":
return [np.concatenate([anchor, anchor + shape]) for anchor, shape in zip(outs[0], outs[1])]
else:
raise ValueError("Test error - unexpected format: {}".format(format))
@nottest
def _test_random_object_bbox_with_class(max_batch_size, ndim, dtype, format=None, fg_prob=None,
classes=None, weights=None, background=None,
threshold=None, k_largest=None, cache=None):
pipe = dali.Pipeline(max_batch_size, 4, device_id=None, seed=4321)
background_out = 0 if background is None else background
classes_out = np.int32([]) if classes is None else classes
weights_out = np.int32([]) if weights is None else weights
threshold_out = np.int32([]) if threshold is None else threshold
if cache:
source = sampled_dataset(2 * max_batch_size, max_batch_size, ndim, dtype)
else:
source = batch_generator(max_batch_size, ndim, dtype)
with pipe:
inp = fn.external_source(source)
if (isinstance(background, dali.pipeline.DataNode)
or (background is not None and background >= 0)):
inp = fn.cast(inp + (background_out + 1), dtype=np_type_to_dali(dtype))
# preconfigure
op = ops.segmentation.RandomObjectBBox(format=format,
foreground_prob=fg_prob,
classes=classes,
class_weights=weights,
background=background,
threshold=threshold,
k_largest=k_largest,
seed=1234)
outs1 = op(inp, cache_objects=cache)
outs2 = op(inp, output_class=True)
if not isinstance(outs1, list):
outs1 = [outs1]
# the second instance should have always at least 2 outputs
assert isinstance(outs2, (list, tuple))
outputs = [inp, classes_out, weights_out, background_out, threshold_out, *outs1, *outs2]
pipe.set_outputs(*outputs)
pipe.build()
format = format or "anchor_shape"
for _ in range(50):
inp, classes_out, weights_out, background_out, threshold_out, *outs = pipe.run()
nout = (len(outs) - 1) // 2
outs1 = outs[:nout]
outs2 = outs[nout:]
for i in range(len(outs1)):
check_batch(outs1[i], outs2[i])
# Iterate over indices instead of elements, because normal iteration
# causes an exception to be thrown in native code, making debugging near impossible.
outs = tuple([np.array(out[i]) for i in range(len(out))] for out in outs1)
box_class_labels = [np.int32(outs2[-1][i]) for i in range(len(outs2[-1]))]
boxes = convert_boxes(outs, format)
for i in range(len(inp)):
in_tensor = inp.at(i)
class_labels = classes_out.at(i)
if background is not None or classes is None:
background_label = background_out.at(i)
else:
background_label = 0 if 0 not in class_labels else np.min(class_labels) - 1
label = box_class_labels[i]
if classes is not None:
assert label == background_label or label in list(class_labels)
is_foreground = label != background_label
cls_boxes = class_boxes(in_tensor, label if is_foreground else None)
if is_foreground:
ref_boxes = cls_boxes
if threshold is not None:
extent = box_extent(boxes[i])
thr = threshold_out.at(i)
assert np.all(extent >= thr)
ref_boxes = list(filter(lambda box: np.all(box_extent(box) >= thr), cls_boxes))
if k_largest is not None:
assert box_in_k_largest(ref_boxes, boxes[i], k_largest)
assert contains_box(cls_boxes, boxes[i])
def test_random_object_bbox_with_class():
np.random.seed(12345)
types = [np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32]
formats = [None, "anchor_shape", "start_end", "box"]
fmt = 0
for bg in [None, 0, -1, 5, random_background()]:
if bg is None or isinstance(bg, int):
class_opt = [None, [0], [1], [2, 4, 5, 7]]
for x in class_opt:
if isinstance(x, list) and bg in x:
x.remove(bg)
if [] in class_opt:
class_opt.remove([])
# putting this in the list interefered with remove
class_opt.append(random_classes(0 if bg is None else bg))
else:
class_opt = [None]
for classes in class_opt:
if classes is None:
weights_opt = [None, [1], [0.5, 1, 0.1, 0.2], random_weights()]
elif isinstance(classes, list):
weights_opt = [None, list(range(1, 1 + len(classes)))]
else:
weights_opt = [None]
for weights in weights_opt:
ndim = np.random.randint(1, 5)
threshold_opt = [None, 3, list(range(1, 1 + ndim)), random_threshold(ndim)]
threshold = random.choice(threshold_opt)
k_largest_opt = [None, 1, 2, 5]
k_largest = random.choice(k_largest_opt)
fg_prob_opt = [None, 0.1, 0.7, fn.random.uniform(range=(0, 1), seed=1515)]
fg_prob = random.choice(fg_prob_opt)
format = formats[fmt]
fmt = (fmt + 1) % len(formats)
dtype = random.choice(types)
cache = np.random.randint(2) == 1
yield (_test_random_object_bbox_with_class,
4, ndim, dtype, format,
fg_prob, classes, weights, bg,
threshold, k_largest,
cache)
@nottest
def _test_random_object_bbox_ignore_class(max_batch_size,
ndim,
dtype,
format=None,
background=None,
threshold=None,
k_largest=None):
pipe = dali.Pipeline(max_batch_size, 4, device_id=None, seed=4321)
background_out = 0 if background is None else background
threshold_out = np.int32([]) if threshold is None else threshold
with pipe:
inp = fn.external_source(batch_generator(max_batch_size, ndim, dtype))
outs = fn.segmentation.random_object_bbox(inp,
format=format,
ignore_class=True,
background=background,
seed=1234,
threshold=threshold,
k_largest=k_largest)
if not isinstance(outs, list):
outs = [outs]
pipe.set_outputs(inp, background_out, threshold_out, *outs)
pipe.build()
format = format or "anchor_shape"
for _ in range(50):
inp, background_out, threshold_out, *outs = pipe.run()
# Iterate over indices instead of elements, because normal iteration
# causes an exception to be thrown in native code, making debugging near impossible.
outs = tuple([np.array(out[i]) for i in range(len(out))] for out in outs)
boxes = convert_boxes(outs, format)
for i in range(len(inp)):
in_tensor = inp.at(i)
background_label = background_out.at(i)
ref_boxes = all_boxes(in_tensor, None, background_label)
if threshold is not None:
thr = threshold_out.at(i)
ref_boxes = list(filter(lambda box: np.all(box_extent(box) >= thr), ref_boxes))
if len(ref_boxes) == 0:
ref_boxes = np.int32([[0] * len(in_tensor.shape) + list(in_tensor.shape)])
if k_largest is not None:
assert box_in_k_largest(ref_boxes, boxes[i], k_largest)
else:
assert contains_box(ref_boxes, boxes[i])
def test_random_object_bbox_ignore_class():
np.random.seed(43210)
types = [np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32]
for bg in [None, 0, -1, 5, random_background()]:
ndim = np.random.randint(1, 5)
dtype = random.choice(types)
for format in [None, "anchor_shape", "start_end", "box"]:
threshold_opt = [None, 3, list(range(1, 1 + ndim)), random_threshold(ndim)]
threshold = random.choice(threshold_opt)
k_largest_opt = [None, 1, 2, 5]
k_largest = random.choice(k_largest_opt)
yield (_test_random_object_bbox_ignore_class,
5, ndim, dtype, format, bg, threshold, k_largest)
@nottest
def _test_random_object_bbox_auto_bg(fg_labels, expected_bg):
"""Checks that a correct backgorund labels is chosen:
0, if 0 is not present in the list of foreground classes
smallest label - 1 if 0 is present
if the smallest label -1 overflows, decrement the label until no collision
"""
pipe = dali.Pipeline(batch_size=1, num_threads=1, device_id=0, seed=1234)
data = np.uint32([0, 1, 2, 3])
box, label = fn.segmentation.random_object_bbox(
data, foreground_prob=1e-9, format="box", output_class=1, classes=fg_labels)
pipe.set_outputs(box, label)
pipe.build()
_, labels = pipe.run()
assert int(labels.at(0)) == expected_bg
def test_random_object_bbox_auto_bg():
for fg, expected_bg in [
([1, 2, 3], 0),
([0, 1, 2], -1),
([-1, 1], 0),
([0, -5], -6),
([-0x80000000, 0x7fffffff], 0),
([-0x80000000, 0x7fffffff, 0, 0x7ffffffe], 0x7ffffffd)
]:
yield _test_random_object_bbox_auto_bg, fg, expected_bg
@nottest
def _test_err_args(**kwargs):
pipe = dali.Pipeline(batch_size=1, num_threads=1, device_id=0, seed=1234)
inp = fn.external_source(data, batch=False)
outs = fn.segmentation.random_object_bbox(inp, **kwargs)
pipe.set_outputs(*outs)
pipe.build()
pipe.run()
def test_err_classes_bg():
with assert_raises(RuntimeError, glob="Class label 0 coincides with background label"):
_test_err_args(classes=[0, 1, 2, 3], background=0)
def test_err_classes_weights_length_clash():
error_msg = r"If both ``classes`` and ``class_weights`` are provided, their shapes must " \
r"match. Got:\s+classes.shape = \{4\}\s+weights.shape = \{3\}"
with assert_raises(RuntimeError, regex=error_msg):
_test_err_args(classes=[0, 1, 2, 3], class_weights=np.float32([1, 2, 3]))
with assert_raises(RuntimeError, regex=error_msg):
_test_err_args(classes=np.int32([0, 1, 2, 3]), class_weights=[3, 2, 1])
def test_err_classes_ignored():
with assert_raises(
RuntimeError,
glob="Class-related arguments * cannot be used when ``ignore_class`` is True"):
_test_err_args(classes=[0, 1, 2, 3], ignore_class=True)
def test_err_k_largest_nonpositive():
with assert_raises(RuntimeError, glob="``k_largest`` must be at least 1; got -1"):
_test_err_args(k_largest=-1)
with assert_raises(RuntimeError, glob="``k_largest`` must be at least 1; got 0"):
_test_err_args(k_largest=0)
def test_err_threshold_dim_clash():
with assert_raises(
RuntimeError,
glob="Argument \"threshold\" expected shape 2 but got 5 values, "
"which can't be interpreted as the expected shape."):
_test_err_args(threshold=[1, 2, 3, 4, 5])
@attr('slow')
def slow_test_large_data():
yield _test_random_object_bbox_with_class, \
4, 5, np.int32, None, 1., [1, 2, 3], None, None, None, 10
|
DALI-main
|
dali/test/python/operator_2/test_random_object_bbox.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali as dali
from nvidia.dali.pipeline import Pipeline
from nvidia.dali.backend_impl import TensorListGPU
import numpy as np
import scipy.stats as st
def check_uniform_default(device='cpu', batch_size=32, shape=[1e5], val_range=None, niter=3):
pipe = Pipeline(batch_size=batch_size, device_id=0, num_threads=3, seed=123456)
with pipe:
pipe.set_outputs(dali.fn.random.uniform(device=device, range=val_range, shape=shape))
pipe.build()
for it in range(niter):
outputs = pipe.run()
val_range = (-1.0, 1.0) if val_range is None else val_range
data_out = outputs[0].as_cpu() \
if isinstance(outputs[0], TensorListGPU) else outputs[0]
pvs = []
for i in range(batch_size):
data = np.array(data_out[i])
# Check that the data is within the default range
assert (data >= val_range[0]).all() and \
(data <= val_range[1]).all(), \
"Value returned from the op is outside of requested range"
h, b = np.histogram(data, bins=10)
mean_h = np.mean(h)
for hval in h:
np.testing.assert_allclose(mean_h, hval, rtol=0.05) # +/- 5%
# normalize to 0-1 range
data_kstest = (data - val_range[0]) / (val_range[1] - val_range[0])
_, pv = st.kstest(rvs=data_kstest, cdf='uniform')
pvs = pvs + [pv]
assert np.mean(pvs) > 0.05, f"data is not a uniform distribution. pv = {np.mean(pvs)}"
def test_uniform_continuous():
batch_size = 4
shape = [100000]
niter = 3
for device in ['cpu', 'gpu']:
for val_range in [None, (200.0, 400.0)]:
yield check_uniform_default, device, batch_size, shape, val_range, niter
def check_uniform_continuous_next_after(device='cpu', batch_size=32, shape=[1e5], niter=3):
batch_size = 4
shape = [100000]
val_range = [np.float32(10.0), np.nextafter(np.float32(10.0), np.float32(11.0))]
pipe = Pipeline(batch_size=batch_size, device_id=0, num_threads=3, seed=123456)
with pipe:
pipe.set_outputs(dali.fn.random.uniform(device=device, range=val_range, shape=shape))
pipe.build()
for it in range(niter):
outputs = pipe.run()
data_out = outputs[0].as_cpu() \
if isinstance(outputs[0], TensorListGPU) else outputs[0]
for i in range(batch_size):
data = np.array(data_out[i])
assert (val_range[0] == data).all(), \
f"{data} is outside of requested range"
def test_uniform_continuous_next_after():
batch_size = 4
shape = [100000]
niter = 3
for device in ['cpu', 'gpu']:
yield check_uniform_continuous_next_after, device, batch_size, shape, niter
def check_uniform_discrete(device='cpu', batch_size=32, shape=[1e5], values=None, niter=10):
pipe = Pipeline(batch_size=batch_size, device_id=0, num_threads=3, seed=123456)
with pipe:
pipe.set_outputs(dali.fn.random.uniform(device=device, values=values, shape=shape))
pipe.build()
for it in range(niter):
outputs = pipe.run()
data_out = outputs[0].as_cpu() \
if isinstance(outputs[0], TensorListGPU) else outputs[0]
values_set = set(values)
maxval = np.max(values)
bins = np.concatenate([values, np.array([np.nextafter(maxval, maxval + 1)])])
bins.sort()
pvs = []
for i in range(batch_size):
data = np.array(data_out[i])
for x in data:
assert x in values_set
h, _ = np.histogram(data, bins=bins)
_, pv = st.chisquare(h)
pvs = pvs + [pv]
assert np.mean(pvs) > 0.05, f"data is not a uniform distribution. pv = {np.mean(pvs)}"
def test_uniform_discrete():
batch_size = 4
shape = [10000]
niter = 3
for device in ['cpu', 'gpu']:
for values in [(0, 1, 2, 3, 4, 5), (200, 400, 5000, 1)]:
yield check_uniform_discrete, device, batch_size, shape, values, niter
|
DALI-main
|
dali/test/python/operator_2/test_uniform.py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali.fn as fn
from nvidia.dali import pipeline_def
from nose_utils import raises
def get_data(shapes):
return [np.empty(shape, dtype=np.uint8) for shape in shapes]
@pipeline_def
def squeeze_pipe(shapes, axes=None, axis_names=None, layout=None):
data = fn.external_source(lambda: get_data(shapes), layout=layout, batch=True, device="cpu")
return fn.squeeze(data, axes=axes, axis_names=axis_names)
def _testimpl_squeeze(axes, axis_names, layout, shapes, expected_out_shapes, expected_layout):
batch_size = len(shapes)
pipe = squeeze_pipe(batch_size=batch_size, num_threads=1, device_id=0, shapes=shapes, axes=axes,
axis_names=axis_names, layout=layout)
pipe.build()
for _ in range(3):
outs = pipe.run()
assert outs[0].layout() == expected_layout
for i in range(batch_size):
out_arr = np.array(outs[0][i])
assert out_arr.shape == expected_out_shapes[i]
def test_squeeze():
# axes, axis_names, layout, shapes, expected_out_shapes, expected_layout
args = [
([1], None, "XYZ", [(300, 1, 200), (10, 1, 10)], [(300, 200), (10, 10)], "XZ"),
([1, 2], None, "XYZ", [(300, 1, 1), (10, 1, 1)], [(300,), (10,)], "X"),
([0, 2], None, "XYZ", [(1, 300, 1), (1, 10, 1)], [(300,), (10,)], "Y"),
([0, 2], None, "ABCD", [(1, 1, 1, 1), (1, 1, 1, 1)], [(1, 1,), (1, 1)], "BD"),
(None, "Z", "XYZ", [(300, 1, 1), (10, 1, 1)], [(300, 1), (10, 1)], "XY"),
(None, "ZY", "XYZ", [(300, 1, 1), (10, 1, 1)], [(300,), (10,)], "X"),
([0], None, "X", [(1)], [()], ""),
([1], None, "XYZ", [(100, 0, 0)], [(100, 0)], "XZ"),
(None, "Z", "XYZ", [(100, 0, 0)], [(100, 0)], "XY"),
(None, "X", "XYZ", [(100, 0, 0)], [(0, 0)], "YZ"),
]
for axes, axis_names, layout, shapes, expected_out_shapes, expected_layout in args:
yield _testimpl_squeeze, axes, axis_names, layout, \
shapes, expected_out_shapes, expected_layout
def _test_squeeze_throw_error(axes, axis_names, layout, shapes):
pipe = squeeze_pipe(batch_size=len(shapes), num_threads=1, device_id=0, shapes=shapes,
axes=axes, axis_names=axis_names, layout=layout)
pipe.build()
pipe.run()
def test_squeeze_throw_error():
args_list = [
([1], None, None, [(300, 1, 200), (10, 10, 10)]),
(None, "C", "XYZ", [(2, 3, 4), (4, 2, 3)]),
(None, "Z", "XYZ", [(1, 1, 10)]),
([2], "Z", "XYZ", [[1, 1, 10]]),
([2, 1], None, "XYZ", [(100, 0, 0)]),
([1, 1], None, "XYZ", [(300, 1, 200), (10, 1, 10)]),
]
expected_errors = [
"Requested a shape with 100 elements but the original shape has 1000 elements.",
"Axis 'C' is not present in the input layout",
"Requested a shape with 1 elements but the original shape has 10 elements.",
"Provided both ``axes`` and ``axis_names`` arguments",
"Requested a shape with 100 elements but the original shape has 0 elements.",
"Specified at least twice same dimension to remove."
]
assert len(expected_errors) == len(args_list)
for (axes, axis_names, layout, shapes), error_msg in zip(args_list, expected_errors):
yield raises(RuntimeError, error_msg)(_test_squeeze_throw_error), \
axes, axis_names, layout, shapes
|
DALI-main
|
dali/test/python/operator_2/test_squeeze.py
|
# Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
|
DALI-main
|
dali/python/nvidia/__init__.py
|
import importlib
import sys
import types
def get_submodule(root, path):
"""Gets or creates submodule(s) of `root`.
If the module path contains multiple parts, multiple modules are traversed or created
Parameters
----------
`root`
module object or name of the root module
`path`
period-separated path of the submodule or a list/tuple of submodule names"""
if isinstance(root, str):
root = sys.modules[root]
if not path:
return root
if isinstance(path, str):
if str == '':
return root
path = path.split('.')
module_name = root.__name__
for part in path:
m = getattr(root, part, None)
module_name += '.' + part
if m is None:
try:
# Try importing existing module (if not loaded yet) to not overwrite it.
m = importlib.import_module(module_name)
except ModuleNotFoundError:
m = sys.modules[module_name] = types.ModuleType(module_name)
setattr(root, part, m)
elif not isinstance(m, types.ModuleType):
raise RuntimeError(
f"The module {root} already contains an attribute \"{part}\", "
f"which is not a module, but {m}")
root = m
return root
|
DALI-main
|
dali/python/nvidia/dali/internal.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the implementation of DALI if statement.
It initializes AutoGraph with the DaliOperatorOverload that provides the overload for the if_stmt
and adjust the filtered modules so DALI code is not converted.
The if_stmt provides access to both branches as callables and the set_state/get_state functions
that allows to capture and adjust all symbols modified within those branches. This allows to
checkpoint the state and visit the code of both branches.
if_stmt highlights which state variables are considered the outputs of the if/else pair - we can
use the state captured after visiting if and else branches and produce fn._conditional.merge
nodes for all of them.
When visiting the if/else scopes, we are tracking tha path that we took and the predicates that
were used via the _ConditionStack. As it is not easy to detect which state variables would be
consumed as inputs to DALI operators, we inject additional code to the operator function.
Every time a DataNode is consumed, we look up in which scope it was produced and travel the
path from that point to the current scope in the _ConditionStack, applying necessary splits.
All the return values are registered to the current scope for further lookups.
"""
from nvidia.dali import _autograph
from nvidia.dali.data_node import DataNode as _DataNode
from nvidia.dali import fn
from nvidia.dali._autograph.utils import ag_logging as logging
from nvidia.dali._autograph.operators import variables
from contextlib import contextmanager
from enum import Enum
import tree
def _data_node_repr(data_node):
return f"DataNode(name={data_node.name}, device={data_node.device}, source={data_node.source})"
class _Branch(Enum):
TrueBranch = 0
FalseBranch = 1
Undefined = 2
class _StackEntry:
"""Information about 1 nesting level of if/else statement.
Keeps the current branch (if we entered if/else branch) and the data nodes that were
produced in their scopes. Keeps the mapping of DataNodes produced in higher scopes that
were already split for use in this scope.
"""
def __init__(self, predicate):
self.predicate = predicate
self.branch = _Branch.Undefined
self.splits = {}
self.produced_true = set()
self.produced_false = set()
# The produced_special handles the case of producing something visible on the same nesting
# level, but not in one of the branches and is used by merge code.
self.produced_special = set()
@property
def produced(self):
"""Access the set of hashes of DataNodes produced in the scope of currently selected branch.
"""
if self.branch == _Branch.TrueBranch:
return self.produced_true
elif self.branch == _Branch.FalseBranch:
return self.produced_false
else:
return self.produced_special | self.produced_true | self.produced_false
@produced.setter
def produced(self, value):
"""Access the set of hashes of DataNodes produced in the scope of currently selected branch.
"""
if self.branch == _Branch.TrueBranch:
self.produced_true = value
elif self.branch == _Branch.FalseBranch:
self.produced_false = value
else:
self.produced_special = value
def add_produced(self, data_node):
"""Add the DataNode or DataNodes to produced in the scope of currently selected branch."""
if isinstance(data_node, _DataNode):
self.produced |= {_data_node_repr(data_node)}
elif isinstance(data_node, list):
if not data_node:
return
if isinstance(data_node[0], _DataNode):
self.produced |= set(_data_node_repr(dn) for dn in data_node)
elif isinstance(data_node[0], list):
flat_list = [item for sublist in data_node for item in sublist]
self.add_produced(flat_list)
else:
raise ValueError(f"Unexpected operator result to register: {data_node}. Expected up to"
" two-level nesting of DataNode.")
def add_split(self, source_data_node, producer_node, true_node, false_node):
"""Register the outputs of split node that were produced from the source_data_node
(or its descendant on this scope, the shortcut node).
Parameters
----------
source_data_node : DataNode
Original source node that was looked up, record for faster consecutive lookups
producer_node : DataNode
The closest node on the path from source_data_node to this split
true_node : DataNode
True branch split
false_node : DataNode
False branch split
"""
self.splits[_data_node_repr(source_data_node)] = (true_node, false_node)
# Record the direct preceding node as the producer:
self.splits[_data_node_repr(producer_node)] = (true_node, false_node)
self.produced_true |= {_data_node_repr(true_node)}
self.produced_false |= {_data_node_repr(false_node)}
def __str__(self):
return (f"StackEntry: pred={self.predicate}, branch={self.branch}, splits={self.splits},"
f" produced={self.produced}")
def has(self, data_node):
"""Check if this DataNode was either produced in this scope or already split for this scope.
"""
if _data_node_repr(data_node) in self.produced:
return True
elif _data_node_repr(data_node) in self.splits:
return True
else:
return False
def get(self, data_node):
"""Return the `data_node` if it was produced in this scope, or the appropriate split node
that was created for accessing the `data_node` in this scope.
"""
assert self.has(data_node)
if _data_node_repr(data_node) in self.produced:
return data_node
else:
assert self.branch in {_Branch.TrueBranch, _Branch.FalseBranch}
return self.splits[_data_node_repr(data_node)][self.branch.value]
class _ConditionStack:
"""Tracks the current if/else scope with the path that we took. Captures the used and produced
data nodes, applying the necessary splits based on the scope level where they were produced
and where they are used.
"""
def __init__(self):
self._stack = [_StackEntry(None)]
self._is_registration_allowed = True
def push_predicate(self, predicate):
"""Add next level of if/else scope that is predicated with the `predicate`.
The user might have provided a predicate from a scope of higher level, which means
that `predicate` might be subject to additional slicing. Apply that slicing and return
the actual predicate that will be used for slicing when entering this scope.
The situation will happen for example in a case like this, where both predicates are
produced in global scope:
pred_0 = ...
pred_1 = ...
if pred_0: # push_pred(pred_0) -> returns pred_0
if pred_1: # push_pred(pred_1) ->
# -> returns fn._conditional.slice(pred_1, predicate=pred_0)
Parameters
----------
predicate : DataNode
Predicate guarding this scope.
Returns
-------
DataNode
Actual predicate after applying necessary slices to use it in this scope.
"""
new_pred = self.preprocess_input(predicate)
new_entry = _StackEntry(new_pred)
self._stack.append(new_entry)
return new_pred
def top(self):
"""Get the top scope in the stack"""
return self._stack[-1]
def pop(self):
"""Remove the top scope from the stack"""
result = self._stack.pop()
return result
def stack_depth(self):
"""Get the depth of the stack. Note, that by default there is at least one element
- the global scope."""
return len(self._stack)
def _find_closest(self, data_node):
"""Find the closest scope level in the stack where we can access this node as produced
(or the split of this node closest to us).
"""
for level in range(self.stack_depth() - 1, -1, -1):
if self._stack[level].has(data_node):
return level
raise ValueError(f"{data_node} was not produced within this trace.")
def _realize_split(self, data_node, stack_level):
"""The data_node was produced (or last accessed as via split) in scope earlier than the
current one, traverse the scopes between that level and current one, and insert split nodes.
Parameters
----------
data_node : DataNode
The data node that we want to use in the current scope.
stack_level : int
Stack level where the data_node was last "seen".
Returns
-------
DataNode
New node that can be used in current branch and scope.
"""
assert 0 <= stack_level and stack_level < self.stack_depth() - 1
produced_data_node = self._stack[stack_level].get(data_node)
bottom = self._stack[:stack_level + 1]
top = self._stack[stack_level + 1:]
self._stack = bottom
while top:
current_entry = top.pop(0)
predicate = current_entry.predicate
# Do not automatically register the outputs in the current scope, we track them below
# in their respective branches.
logging.log(9, (f"{self._indent()}[IF] Inserting split"
f" at {self.stack_depth() -1}:"
f" split({produced_data_node}, predicate={predicate}."))
self._is_registration_allowed = False
true_node, false_node = fn._conditional.split(produced_data_node, predicate=predicate,
_if_stmt=True)
self._is_registration_allowed = True
# Record the result of splitting the `data_node` that we are trying to look up
# (short-cut for consecutive lookups)
current_entry.add_split(data_node, produced_data_node, true_node, false_node)
if current_entry.branch == _Branch.TrueBranch:
produced_data_node = true_node
else:
produced_data_node = false_node
self._stack.append(current_entry)
return produced_data_node
def preprocess_input(self, data_node):
"""Process the DataNode that is an input to an operator call. Detect if the DataNode was
produced on the same nesting level. If not, split accordingly to the stack of the previous
conditions. Caches the previously processed DataNodes to not do repeated splitting.
"""
stack_level = self._find_closest(data_node)
logging.log(8, (f"{self._indent()}[IF/Input] {data_node} accessed at level"
f" {self.stack_depth() - 1} found at {stack_level}."))
# We already have it cached or produced in this scope.
if stack_level == self.stack_depth() - 1:
return self.top().get(data_node)
# otherwise, we need to fill in the splits.
return self._realize_split(data_node, stack_level)
def register_data_nodes(self, data_nodes, global_scope=False):
"""Register the data nodes as produced in current scope, otherwise if `global_scope` is True
put them in the outermost scope.
"""
if not self._is_registration_allowed:
return
logging.log(8, (f"{self._indent()}[IF/Register] {data_nodes} at {self.stack_depth() -1}"))
scope = self._stack[0] if global_scope else self.top()
tree.map_structure(lambda node: scope.add_produced(node), data_nodes)
def track_true_branch(self):
"""Mark `if` (true) branch as current scope."""
self.top().branch = _Branch.TrueBranch
def track_false_branch(self):
"""Mark `else` (false) branch as current scope."""
self.top().branch = _Branch.FalseBranch
def no_branch(self):
"""Mark no branch being tracked, the scope "level" stays related to the same if/else
statement."""
self.top().branch = _Branch.Undefined
def track_merge(self, split_predicate):
"""Enter the merge section of the if/else statement. It adds the corresponding
split_predicate to the nodes visible as produced in the current scope, so all data nodes
are directly accessible in this scope when looked up by the merge operator.
We don't care about removing it as it's the last thing happening in that statement.
"""
self.no_branch()
self.top().add_produced(split_predicate)
def scope_batch_size_tracker(self):
"""Return the DataNode that can be used as a reference batch size in this scope.
None is returned if we are in the top level scope.
"""
if self.stack_depth() == 1:
return None
if self.top().branch in {_Branch.TrueBranch, _Branch.FalseBranch}:
# In worst case we will introduce a split on the predicate itself, but we know,
# we can consistently do it, and it will happen only for the first operator call,
# for the following ones in this scope it will be cached.
return self.preprocess_input(self.top().predicate)
else:
# If we are in the merge stage, just use the size of the predicate
return self.top().predicate
def _indent(self):
"""Helper for indenting the log messages to resemble visited scopes"""
return ' ' * (self.stack_depth() - 1)
@contextmanager
def _cond_manager(predicate):
actual_predicate = this_condition_stack().push_predicate(predicate)
logging.log(7, (f"{this_condition_stack()._indent()}[IF]: {predicate}"
f" at {this_condition_stack().stack_depth() - 1}"))
# Return it so we can use it in merge
yield actual_predicate
this_condition_stack().pop()
@contextmanager
def _cond_true():
this_condition_stack().track_true_branch()
logging.log(7, (f"{this_condition_stack()._indent()}[IF]: `if` branch"
f" at {this_condition_stack().stack_depth() - 1}"))
yield
this_condition_stack().no_branch()
@contextmanager
def _cond_false():
this_condition_stack().track_false_branch()
logging.log(7, (f"{this_condition_stack()._indent()}[IF]: `else` branch"
f" at {this_condition_stack().stack_depth() - 1}"))
yield
this_condition_stack().no_branch()
@contextmanager
def _cond_merge(split_predicate):
this_condition_stack().track_merge(split_predicate)
yield
this_condition_stack().no_branch()
def conditionals_enabled():
"""Check (within a Pipeline context) if the conditionals are enabled.
"""
from nvidia.dali._debug_mode import _PipelineDebug
current_pipeline = _PipelineDebug.current()
enabled = getattr(current_pipeline, '_conditionals_enabled', False)
return enabled
def this_condition_stack():
"""Return the condition stack of current Pipeline"""
from nvidia.dali._debug_mode import _PipelineDebug
current_pipeline = _PipelineDebug.current()
if current_pipeline._condition_stack is None:
raise ValueError("Cannot access current condition stack when conditionals"
" were not enabled for a given pipeline.")
return current_pipeline._condition_stack
def register_data_nodes(data_node, inputs=[], args={}):
"""Register the outputs of the operator as produced in the scope of the current conditional
branch. Pass the list of inputs and dictionary of arguments to automatically detect if any
DataNode was passed to that operator, indicating that it has proper inputs or argument inputs
and can infer the batch size. Otherwise the outputs are registered in global scope, assuming
that they use current batch size.
Parameters
----------
data_node : DataNode or a list/tuple of DataNode
The output of the operator to be registered.
inputs : List of DataNode
Optional list of inputs of the operator whose outputs we are registering.
args : Dict of DataNode
Optional dictionary containing the arguments of the operator whose outputs we are
registering.
"""
any_positional_input = any(isinstance(input, _DataNode) for input in inputs)
any_arg_input = any(isinstance(arg, _DataNode) for arg_name, arg in args.items())
any_input = any_positional_input or any_arg_input
# TODO(klecki): In theory we have two approaches for inputless operators. Here we insert their
# outputs to top level and let the automatic splitting handle the situation. Otherwise we could
# pass the scope information and batch_size within that scope to all operators that are invoked
# within that scope.
this_condition_stack().register_data_nodes(data_node, global_scope=not any_input)
def apply_conditional_split(input):
"""Preprocess the DataNode to obtain correctly split batch for the current if scope."""
return this_condition_stack().preprocess_input(input)
def apply_conditional_split_to_branch_outputs(branch_outputs, promote_constants=True):
"""Apply splitting to the branch outputs. This may be necessary for DataNodes that are
branch outputs but were not touched in that branch (for example that branch is no-op).
Parameters
----------
branch_outputs : tuple of DataNode
Outputs of the branch
promote_constants : bool, optional
Whether to promote constants to cpu-based Constant op, by default True
Returns
-------
tuple of DataNode
"""
from nvidia.dali.types import Constant
def apply_split(atom):
if isinstance(atom, _DataNode):
return apply_conditional_split(atom)
elif promote_constants:
# We assume that any return from the branch must be merged, so constants are promoted
# to batches using constant op, and thus can be used in merge.
constant_node = Constant(atom, device="cpu")
register_data_nodes(constant_node)
return apply_conditional_split(constant_node)
return atom
return tree.map_structure(apply_split, branch_outputs)
def apply_conditional_split_to_args(inputs, kwargs):
"""Preprocess the inputs and kwargs of the operator to obtain correctly split inputs for the
current if scope."""
inputs = apply_conditional_split_to_branch_outputs(inputs, False)
for key, arg in kwargs.items():
if isinstance(arg, _DataNode):
kwargs[key] = apply_conditional_split(arg)
return inputs, kwargs
def _verify_branch_outputs(outputs, symbol_names, branch_name):
"""Verifies variables output by a conditional branch for consistency."""
common_explanation = (
"Encountered inconsistent outputs out of the `if/else` control flow statement."
" Variables need to be initialized in every code path (both `if` branches).")
for name, output in zip(symbol_names, outputs):
if isinstance(output, variables.Undefined):
raise RuntimeError(f"{common_explanation} Variable '{name}' must also be initialized"
f" in the `{branch_name}` branch.")
if isinstance(output, variables.UndefinedReturnValue):
raise RuntimeError(f"{common_explanation} The `{branch_name}` branch must also have"
f" a return statement.")
class DaliOperatorOverload(_autograph.OperatorBase):
def detect_overload_ld(self, v):
return isinstance(v, _DataNode)
def ld(self, v):
branch_v = apply_conditional_split(v)
return branch_v
def detect_overload_if_stmt(self, cond):
return isinstance(cond, _DataNode)
def if_stmt(self, cond, body, orelse, get_state, set_state, symbol_names, nouts):
# Initial checkpoint before if
init_state = get_state()
with _cond_manager(cond) as split_predicate:
# Set the state for the body inputs, execute the body and collect the outputs.
# Verify if all outputs are initialized within the branch, split the outputs if they
# were just passed through, so they can be merged with the other branch.
with _cond_true():
body()
body_state = get_state()
_verify_branch_outputs(body_state, symbol_names, "if")
body_outputs = body_state[:nouts]
body_outputs = apply_conditional_split_to_branch_outputs(body_outputs)
# Do the same for else block.
set_state(init_state)
with _cond_false():
orelse()
orelse_state = get_state()
_verify_branch_outputs(orelse_state, symbol_names, "else")
orelse_outputs = orelse_state[:nouts]
orelse_outputs = apply_conditional_split_to_branch_outputs(orelse_outputs)
# Build the state that is the combination of both branches. Only the actual outputs
# should be affected by the if/else blocks, the rest can be reused from-before split.
output_values = []
# We execute the merge _after_ both branches, and pretend for a moment, that it
# can see those values produced in child scopes.
with _cond_merge(split_predicate):
err_msg = ("Divergent data found in different branches of `if/else` control flow"
" statement. Variables in all code paths are merged into common output"
" batches. The values assigned to a given variable need to have the same"
" nesting structure in every code path (both `if` branches).\n"
"For example, if we define a variable as a tuple in one branch, it must"
" be defined as a tuple of the same length in the other branch - the"
" contents of the tuples may be different. If we define a variable as"
" a dictionary, the other branch must define it as a dictionary with the"
" same set of keys, the values may be different.\n")
try:
tree.assert_same_structure(body_outputs, orelse_outputs, check_types=True)
except ValueError as e:
# Suppress the original exception, add DALI explanation at the beginning,
# raise the full error message.
raise ValueError(err_msg + str(e)) from None
except TypeError as e:
raise TypeError(err_msg + str(e)) from None
def merge_branches(new_body_val, new_orelse_val):
logging.log(9, (f"{this_condition_stack()._indent()}[IF] Inserting merge"
f" at {this_condition_stack().stack_depth() -1}:"
f" merge({new_body_val}, {new_orelse_val}, predicate="
f"{split_predicate}."))
return fn._conditional.merge(new_body_val, new_orelse_val,
predicate=split_predicate)
output_values = tree.map_structure(merge_branches, body_outputs, orelse_outputs)
# Register the new nodes outside of the conditional scope, they will be used in subsequent
# calls.
this_condition_stack().register_data_nodes(output_values, False)
# No point in propagating the split/merged values that won't be read later.
output_values += init_state[nouts:]
set_state(output_values)
def detect_overload_not_(self, a):
return isinstance(a, _DataNode)
def not_(self, a):
# Not is eager (not lazy)
return fn._conditional.not_(a)
def detect_overload_lazy_and(self, a):
return isinstance(a, _DataNode)
def lazy_and(self, a_value, b):
# We proceed similarly to `if` statement, but we don't have to trace branches and go back.
# Instead we have one branch already evaluated and conditionally execute the other one.
# effectively we want `and_output = a_val and b` to be calculated as:
# if a_val:
# and_output = b()
# else:
# and_output = a_val
a_validated = fn._conditional.validate_logical(a_value, expression_name="and",
expression_side="left")
with _cond_manager(a_validated) as split_predicate:
with _cond_true():
b_value = b()
b_validated = fn._conditional.validate_logical(b_value, expression_name="and",
expression_side="right")
body_outputs = apply_conditional_split(b_validated)
with _cond_false():
else_outputs = apply_conditional_split(split_predicate)
with _cond_merge(split_predicate):
merged = fn._conditional.merge(body_outputs, else_outputs,
predicate=split_predicate)
this_condition_stack().register_data_nodes([merged], False)
return merged
def detect_overload_lazy_or(self, a):
return isinstance(a, _DataNode)
def lazy_or(self, a_value, b):
# To implement `or_output = a_val or b` we calculate it as:
# if a_val:
# or_output = a_val
# else:
# or_output = b()
a_validated = fn._conditional.validate_logical(a_value, expression_name="or",
expression_side="left")
with _cond_manager(a_validated) as split_predicate:
with _cond_true():
body_outputs = apply_conditional_split(split_predicate)
with _cond_false():
b_value = b()
b_validated = fn._conditional.validate_logical(b_value, expression_name="or",
expression_side="right")
else_outputs = apply_conditional_split(b_validated)
with _cond_merge(split_predicate):
merged = fn._conditional.merge(body_outputs, else_outputs,
predicate=split_predicate)
this_condition_stack().register_data_nodes([merged], False)
return merged
_OVERLOADS = DaliOperatorOverload()
_autograph.initialize_autograph(_OVERLOADS, convert_modules=["nvidia.dali.auto_aug"],
do_not_convert_modules=["nvidia.dali._autograph", "nvidia.dali"])
|
DALI-main
|
dali/python/nvidia/dali/_conditionals.py
|
# Copyright (c) 2017-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.backend_impl import (
Init, OpSpec, LoadLibrary, GetCudaVersion, GetCufftVersion, GetNppVersion, GetNvjpegVersion)
# TODO: Handle forwarding imports from backend_impl
from nvidia.dali.backend_impl import * # noqa: F401, F403
from . import __cuda_version__
import warnings
import os
import sys
# Note: If we ever need to add more complex functionality
# for importing the DALI c++ extensions, we can do it here
default_plugins = []
def deprecation_warning(what):
# show only this warning
with warnings.catch_warnings():
warnings.simplefilter("default")
warnings.warn(what, Warning, stacklevel=2)
initialized = False
if not initialized:
Init(OpSpec("CPUAllocator"), OpSpec("PinnedCPUAllocator"), OpSpec("GPUAllocator"))
initialized = True
# py39 warning
if sys.version_info[0] == 3 and sys.version_info[1] >= 11:
deprecation_warning("DALI support for Python {0}.{1} is experimental and some "
"functionalities may not work."
"".format(sys.version_info[0], sys.version_info[1]))
# py36 warning
if sys.version_info[0] == 3 and sys.version_info[1] == 6:
deprecation_warning("DALI 1.27 is the last release to support Python 3.6. "
"Please update your environment to use Python 3.7, 3.8, "
"3.9, 3.10, or (experimentally) 3.11.")
if int(str(__cuda_version__)[:2]) < 11:
deprecation_warning("DALI 1.21 is the last official release that supports CUDA 10.2. "
"Please update your environment to CUDA version 11 or newer.")
for lib in default_plugins:
LoadLibrary(os.path.join(os.path.dirname(__file__), lib))
cuda_checked = False
def check_cuda_runtime():
"""
Checks the availability of CUDA runtime/GPU, and NPP, nvJEPG, and cuFFT libraries and prints an
appropriate warning.
"""
global cuda_checked
if not cuda_checked:
cuda_checked = True
if GetCudaVersion() == -1:
deprecation_warning("GPU is not available. Only CPU operators are available.")
if GetCufftVersion() == -1:
deprecation_warning("nvidia-dali-cuda120 is no longer shipped with CUDA runtime. "
"You need to install it separately. cuFFT is typically "
"provided with CUDA Toolkit installation or an appropriate wheel. "
"Please check "
"https://docs.nvidia.com/cuda/cuda-quick-start-guide/index.html"
"#pip-wheels-installation-linux "
"for the reference.")
if GetNppVersion() == -1:
deprecation_warning("nvidia-dali-cuda120 is no longer shipped with CUDA runtime. "
"You need to install it separately. NPP is typically "
"provided with CUDA Toolkit installation or an appropriate wheel. "
"Please check "
"https://docs.nvidia.com/cuda/cuda-quick-start-guide/index.html"
"#pip-wheels-installation-linux "
"for the reference.")
if GetNvjpegVersion() == -1:
deprecation_warning("nvidia-dali-cuda120 is no longer shipped with CUDA runtime. "
"You need to install it separately. nvJPEG is typically "
"provided with CUDA Toolkit installation or an appropriate wheel. "
"Please check "
"https://docs.nvidia.com/cuda/cuda-quick-start-guide/index.html"
"#pip-wheels-installation-linux "
"for the reference.")
|
DALI-main
|
dali/python/nvidia/dali/backend.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import pickle
import sys
import types
import marshal
import importlib
def dummy_lambda():
pass
# unfortunately inspect.getclosurevars does not yield global names referenced by
# the code syntactically nested inside the function, this includes nested functions
# and list comprehension, for instance in case of [exp1 for exp2 in exp3] occuring inside
# a function, references from exp1 would be omitted
def get_global_references_from_nested_code(code, global_scope, global_refs):
for constant in code.co_consts:
if inspect.iscode(constant):
closure = tuple(types.CellType(None) for _ in range(len(constant.co_freevars)))
dummy_function = types.FunctionType(constant, global_scope, 'dummy_function',
closure=closure)
global_refs.update(inspect.getclosurevars(dummy_function).globals)
get_global_references_from_nested_code(constant, global_scope, global_refs)
def set_funcion_state(fun, state):
fun.__globals__.update(state['global_refs'])
fun.__defaults__ = state['defaults']
fun.__kwdefaults__ = state['kwdefaults']
def function_unpickle(name, qualname, code, closure):
code = marshal.loads(code)
global_scope = {'__builtins__': __builtins__}
fun = types.FunctionType(code, global_scope, name, closure=closure)
fun.__qualname__ = qualname
return fun
def function_by_value_reducer(fun):
cl_vars = inspect.getclosurevars(fun)
code = marshal.dumps(fun.__code__)
basic_def = (fun.__name__, fun.__qualname__, code, fun.__closure__)
global_refs = dict(cl_vars.globals)
get_global_references_from_nested_code(fun.__code__, fun.__globals__, global_refs)
fun_context = {
'global_refs': global_refs,
'defaults': fun.__defaults__,
'kwdefaults': fun.__kwdefaults__
}
return function_unpickle, basic_def, fun_context, None, None, set_funcion_state
def module_unpickle(name, origin, submodule_search_locations):
if name in sys.modules:
return sys.modules[name]
spec = importlib.util.spec_from_file_location(
name, origin, submodule_search_locations=submodule_search_locations)
module = importlib.util.module_from_spec(spec)
sys.modules[name] = module
spec.loader.exec_module(module)
return module
def module_reducer(module):
spec = module.__spec__
return module_unpickle, (spec.name, spec.origin, spec.submodule_search_locations)
def set_cell_state(cell, state):
cell.cell_contents = state['cell_contents']
def cell_unpickle():
return types.CellType(None)
def cell_reducer(cell):
return (cell_unpickle,
tuple(),
{'cell_contents': cell.cell_contents},
None,
None,
set_cell_state)
class DaliCallbackPickler(pickle.Pickler):
def reducer_override(self, obj):
if inspect.ismodule(obj):
return module_reducer(obj)
if isinstance(obj, types.CellType):
return cell_reducer(obj)
if inspect.isfunction(obj):
if isinstance(obj, type(dummy_lambda)) and obj.__name__ == dummy_lambda.__name__ or \
getattr(obj, '_dali_pickle_by_value', False):
return function_by_value_reducer(obj)
try:
pickle.dumps(obj)
except AttributeError as e:
if "Can't pickle local object" in str(e):
return function_by_value_reducer(obj)
except pickle.PicklingError as e:
if "it's not the same object as" in str(e):
return function_by_value_reducer(obj)
return NotImplemented
|
DALI-main
|
dali/python/nvidia/dali/reducers.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import math
import traceback
import warnings
from queue import Queue
import nvidia.dali.backend as _b
import nvidia.dali.ops as _ops
import nvidia.dali.pipeline as _pipeline
import nvidia.dali.tensors as _tensors
import nvidia.dali.types as _types
from nvidia.dali import _conditionals
from nvidia.dali._utils.eager_utils import _Classification, _transform_data_to_tensorlist
from nvidia.dali.data_node import DataNode as _DataNode, _check
from nvidia.dali.fn import _to_snake_case
from nvidia.dali._utils.external_source_impl import (
get_callback_from_source as _get_callback_from_source,
accepted_arg_count as _accepted_arg_count)
from nvidia.dali.ops._operator_utils import (_build_input_sets, _repack_output_sets)
class DataNodeDebug(_DataNode):
"""Wrapper class around Tensor, implementing all of the DataNode attributes."""
def __init__(self, data, name, device, source):
super().__init__(name, device, source)
self._data = data
def __str__(self):
indent = ' ' * 4
return f'DataNodeDebug(\n{indent}name="{self.name}",\n{indent}data=' \
+ f'{_tensors._tensorlist_to_string(self._data, indent + " " * 5)})'
__repr__ = __str__
def gpu(self):
if _conditionals.conditionals_enabled():
# Treat it the same way as regular operator would behave
[self_split], _ = _conditionals.apply_conditional_split_to_args([self], {})
data = self_split._data._as_gpu() if self.device == "cpu" else self_split._data
gpu_node = DataNodeDebug(data, self_split.name, "gpu", self_split.source)
_conditionals.register_data_nodes(gpu_node, [self])
return gpu_node
if self.device == 'gpu':
return self
return DataNodeDebug(self._data._as_gpu(), self.name, 'gpu', self.source)
def get(self):
return self._data
def shape(self):
return self._data.shape()
@staticmethod
def _arithm_op(*inputs, name=None):
return _PipelineDebug.current()._wrap_op_call(_ops.ArithmeticGenericOp,
DataNodeDebug._aritm_op_name,
*inputs,
name=name)
def __add__(self, other):
return self._arithm_op(self, other, name='add')
def __radd__(self, other):
return self._arithm_op(other, self, name='add')
def __sub__(self, other):
return self._arithm_op(self, other, name='sub')
def __rsub__(self, other):
return self._arithm_op(other, self, name='sub')
def __mul__(self, other):
return self._arithm_op(self, other, name='mul')
def __rmul__(self, other):
return self._arithm_op(other, self, name='mul')
def __pow__(self, other):
return self._arithm_op(self, other, name='pow')
def __rpow__(self, other):
return self._arithm_op(other, self, name='pow')
def __truediv__(self, other):
return self._arithm_op(self, other, name='fdiv')
def __rtruediv__(self, other):
return self._arithm_op(other, self, name='fdiv')
def __floordiv__(self, other):
return self._arithm_op(self, other, name='div')
def __rfloordiv__(self, other):
return self._arithm_op(other, self, name='div')
def __neg__(self):
return self._arithm_op(self, name='minus')
def __eq__(self, other):
return self._arithm_op(self, other, name='eq')
def __ne__(self, other):
return self._arithm_op(self, other, name='neq')
def __lt__(self, other):
return self._arithm_op(self, other, name='lt')
def __le__(self, other):
return self._arithm_op(self, other, name='leq')
def __gt__(self, other):
return self._arithm_op(self, other, name='gt')
def __ge__(self, other):
return self._arithm_op(self, other, name='geq')
def __and__(self, other):
return self._arithm_op(self, other, name='bitand')
def __rand__(self, other):
return self._arithm_op(other, self, name='bitand')
def __or__(self, other):
return self._arithm_op(self, other, name='bitor')
def __ror__(self, other):
return self._arithm_op(other, self, name='bitor')
def __xor__(self, other):
return self._arithm_op(self, other, name='bitxor')
def __rxor__(self, other):
return self._arithm_op(other, self, name='bitxor')
_aritm_op_name = _to_snake_case(_ops.ArithmeticGenericOp.__name__)
class _ExternalSourceDebug:
"""Debug mode version of ExternalSource operator."""
def __init__(
self, source=None, num_outputs=None, batch_size=-1, cycle=None, name=None, device='cpu',
device_id=-1, layout=None, batch=None, batch_info=None, **kwargs):
if name is not None and num_outputs is not None:
raise ValueError("`num_outputs` is not compatible with named `ExternalSource`")
callback, source_desc = _get_callback_from_source(source, cycle, batch_info or False)
self._name = name
self._layout = layout
self._num_outputs = num_outputs
self._batch = batch
self._batch_size = batch_size
self._callback = callback
self._device = device
self._device_id = device_id
self._source_desc = source_desc
self._batch_info = batch_info
self._current_iter = 0
self._current_sample = 0
self._feed_inputs = Queue()
if callback is not None:
arg_count = _accepted_arg_count(callback)
if arg_count not in [0, 1]:
raise TypeError("External source callback must be a callable with 0 or 1 argument")
self.accepts_arg = arg_count > 0
def _callback_args(self, idx_in_batch, epoch_idx):
if not self.accepts_arg:
return ()
if idx_in_batch is not None:
arg = _types.SampleInfo(self._current_sample + idx_in_batch, idx_in_batch,
self._current_iter, epoch_idx)
elif self._batch_info:
arg = _types.BatchInfo(self._current_iter, epoch_idx)
else:
arg = self._current_iter
return (arg, )
def _get_batch(self, epoch_idx):
try:
if self._batch:
callback_out = self._callback(*self._callback_args(None, epoch_idx))
else:
callback_out = [
self._callback(*self._callback_args(i, epoch_idx))
for i in range(self._batch_size)
]
self._current_sample += self._batch_size
self._current_iter += 1
except StopIteration:
self._current_iter = 0
self._current_sample = 0
raise
return callback_out
def _feed_input(self, data, kwargs):
if self._callback is not None:
raise RuntimeError(
f"Cannot use `feed_input` on the external source '{self._name}' with a `source`"
f" argument specified.")
self._feed_inputs.put((data, kwargs))
def _fetch(self, epoch_idx):
"""Fetches data from callback or provided with feed_input."""
def to_data_node_debug(data):
data = _transform_data_to_tensorlist(data, self._batch_size, layout, self._device_id)
if self._device == 'gpu' and isinstance(data, _tensors.TensorListCPU):
data = data._as_gpu()
elif self._device == 'cpu' and isinstance(data, _tensors.TensorListGPU):
data = data.as_cpu()
warnings.warn('Loading GPU-originated data into CPU ExternalSource operator is '
'discouraged and might be inefficient', Warning)
return DataNodeDebug(data, self._name, self._device, self._source_desc)
if self._callback is not None:
callback_out = self._get_batch(epoch_idx)
layout = self._layout
if self._num_outputs is not None:
raw_data = []
for idx in range(self._num_outputs):
if self._batch:
raw_data.append(callback_out[idx])
else:
raw_data.append([callback_out[i][idx] for i in range(self._batch_size)])
else:
raw_data = callback_out
else:
raw_data, feed_input_params = self._feed_inputs.get()
layout = feed_input_params.get('layout', self._layout)
if self._num_outputs is not None:
return [to_data_node_debug(data) for data in raw_data]
return to_data_node_debug(raw_data)
class _IterBatchInfo:
def __init__(self, size, source_context, non_uniform_batch=False):
"""Track information about the batch size within the iteration.
Parameters
----------
size : int
The size of the detected batch size to be maintained in this iteration
source_context
Source information about where the batch size was detected for better error reporting.
non_uniform_batch : bool, optional
Should be set to True if pure Split/Merge are used by hand and not as an implementation
of conditional operation. In that case there can be varying batch sizes within
the pipeline and we don't know how to track them, by default False
"""
self._size = size
self._source_context = source_context
self._non_uniform_batch = non_uniform_batch
@property
def size(self):
# We are not tracking the batch size in case of maunal Split/Merge usage
if self._non_uniform_batch:
return -1
# If we are in conditional scope, try to get a DataNode, that will be a reference
# for the batch size with which to run the operators.
if _conditionals.conditionals_enabled():
cs = _conditionals.this_condition_stack()
batch = cs.scope_batch_size_tracker()
assert batch is None or isinstance(batch, DataNodeDebug), \
"Conditionals in debug mode work only with DataNodeDebug"
if batch is not None:
return len(batch.get())
return self._size
def reset(self):
self._size = -1
self._source_context = None
def mark_non_uniform_batch(self):
"""Mark the usage of Split operator that is not a part of the conditional statement
(the _if_stmt=True was not passed). It indicates that the batch tracking is no longer
possible as we cannot detect the conditional scopes.
"""
self._non_uniform_batch = True
def set_if_empty(self, size, context):
if self.size == -1:
self.__init__(size, context, self._non_uniform_batch)
return True
return False
def check_input(self, other_size, other_context, op_name, input_idx):
if not self.set_if_empty(other_size, other_context) and self.size != other_size:
raise RuntimeError(f"Batch size must be uniform across an iteration. "
f"Input {input_idx} for operator '{op_name}' has batch "
f"size = {other_size}. Expected batch size = {self.size}"
f"from:\n{self._source_context}")
def check_external_source(self, other_size, other_context, output_idx=-1):
if not self.set_if_empty(other_size, other_context) and self.size != other_size:
if self._source_context == other_context and output_idx > 0:
raise RuntimeError(
f"External source must return outputs with consistent batch size. "
f"Output {output_idx} has batch size = {other_size}, "
f"previous batch size = {self.size}")
else:
raise RuntimeError(
f"Batch size must be uniform across an iteration. External Source "
f"operator returned batch size: {other_size}, expected: {self.size}.\n"
f"If you want to use variable batch size (that is different batch size in "
f"each iteration) you must call all the external source operators at the "
f"beginning of your debug pipeline, before other DALI operators. "
f"All the external source operators are expected to return the same "
f"batch size in a given iteration, but it can change between the iterations. "
f"Other operators will use that batch size for processing.")
class _OperatorManager:
"""Utility class to manage single operator in the debug mode.
Uses :class:`ops.Operator` to create OpSpec and handle input sets.
"""
def __init__(self, op_class, op_name, pipe, source_context, next_logical_id, batch_size,
device_id, seed, inputs, kwargs):
"""Creates direct operator."""
self._batch_size = batch_size
self._separate_kwargs(kwargs)
if op_name == 'arithmetic_generic_op':
inputs = self._init_arithm_op(kwargs['name'], inputs)
# Save inputs classification for later verification.
self._inputs_classification = []
# When using input sets we have to create separate operators for each input.
input_set_len = -1
for i, input in enumerate(inputs):
classification = _Classification(input, f'Input {i}')
if isinstance(classification.is_batch, list):
if input_set_len == -1:
input_set_len = len(classification.is_batch)
elif input_set_len != len(classification.is_batch):
raise ValueError("All argument lists for Multiple Input Sets used "
f"with operator '{op_name}' must have the same length.")
self._inputs_classification.append(classification)
if _conditionals.conditionals_enabled():
if input_set_len != -1:
raise ValueError("Multiple input sets are not supported with conditional"
" execution (when `enable_conditionals=True`).")
self.expected_inputs_size = len(inputs)
if 'device' not in self._init_args and len(inputs) > 0:
self._init_args['device'] = self._inputs_classification[0].device
if 'seed' not in self._init_args:
self._init_args['seed'] = seed
self._device = self._init_args.get('device', 'cpu')
self._device_id = device_id
self._expected_inputs_size = len(inputs)
self.op_helper = op_class(**self._init_args)
self._op_name = op_name
self.op_spec = self.op_helper._spec
self._pipe = pipe
self._source_context = source_context
self.logical_ids = [
id for id in range(next_logical_id, next_logical_id + abs(input_set_len))
]
for i in range(len(inputs)):
self.op_spec.AddInput(op_name + f'[{i}]', self._inputs_classification[i].device)
for arg_name in self._call_args.keys():
# To use argument inputs OpSpec needs it specified (can be an empty placeholder).
self.op_spec.AddArgumentInput(arg_name, '')
if self.op_helper.schema_name == "_conditional__Split":
# TODO(klecki): Other than __repr__, there is no access to the OpSpec on Python side
# We need to check if this op is marked as `_if_stmt` or it is manually placed.
if "_if_stmt: True" not in repr(self.op_spec):
self._pipe._cur_iter_batch_info.mark_non_uniform_batch()
def _separate_kwargs(self, kwargs):
self._init_args = {}
self._call_args = {}
self._kwargs_classification = {}
for key, value in kwargs.items():
classification = _Classification(value, f'Argument {key}',
arg_constant_len=self._batch_size)
if classification.is_batch:
self._call_args[key] = classification.data
else:
self._init_args[key] = classification.data
self._kwargs_classification[key] = classification
def _init_arithm_op(self, name, inputs):
"""Fills arithmetic operator init arguments and returns inputs that are DataNodes."""
categories_idxs, data_nodes, integers, reals = _ops._group_inputs(inputs)
input_desc = _ops._generate_input_desc(categories_idxs, integers, reals)
self._init_args['device'] = _ops._choose_device(data_nodes)
self._init_args['expression_desc'] = f'{name}({input_desc})'
self._init_args['integer_constants'] = integers
self._init_args['real_constants'] = reals
return data_nodes
def _pack_to_data_node_debug(self, data, position=None):
if isinstance(data, (list, tuple)):
return [self._pack_to_data_node_debug(elem, pos) for pos, elem in enumerate(data)]
def position_to_suffix(position):
if position is None:
return ""
else:
return f"[{position}]"
return DataNodeDebug(data,
self._op_name + position_to_suffix(position),
'gpu' if isinstance(data, _tensors.TensorListGPU) else 'cpu',
self)
def _check_arg_len(self, expected_len, actual_len, args_type):
if expected_len != actual_len:
raise RuntimeError(
f"Trying to use operator '{self._op_name}' "
f"with different number of {args_type} than"
f" when it was built. Expected: {expected_len} {args_type}, got {actual_len}.")
def _check_device_classification(self, expected, actual, arg_type, value):
if expected != actual:
raise RuntimeError(
f"{arg_type} {value} for operator '{self._op_name}' is on '{actual}' "
f"but was on '{expected}' when created.")
def _check_batch_classification(self, expected_is_batch, actual_is_batch, arg_type, value):
def classification_to_str(is_batch):
return 'batch' if is_batch else 'constant'
if expected_is_batch != actual_is_batch:
expected_str = classification_to_str(expected_is_batch)
actual_str = classification_to_str(actual_is_batch)
raise RuntimeError(
f"{arg_type} {value} for operator '{self._op_name}' is a {actual_str} "
f"but was a {expected_str} when created.")
def _check_batch_size(self, classification, input_idx):
if isinstance(classification.is_batch, list):
# Checking for input set.
for input in classification.data:
self._pipe._cur_iter_batch_info.check_input(len(input),
self._source_context,
self._op_name,
input_idx)
else:
self._pipe._cur_iter_batch_info.check_input(len(classification.data),
self._source_context,
self._op_name,
input_idx)
def _check_call_arg_meta_data(self, expected_data, actual_data, arg_type, value):
""" Check for changes in layout, ndim and dtype.
Args:
expected_data: Expected value of the data.
actual_data: Actual value of the data.
arg_type (str): String representation of the argument type, e.g. 'Input', 'Argument'.
value (str): Argument name for keyword arguments and a number for inputs.
"""
def raise_err(meta_name, actual_value, expected_value):
raise RuntimeError(
f"{arg_type} {value} for operator '{self._op_name}' has "
f"{meta_name} = {actual_value}, expected: {expected_value}.")
expected_input_set = isinstance(expected_data, list)
if expected_input_set != isinstance(actual_data, list):
raise RuntimeError(f"{arg_type} {value} expected {'' if expected_input_set else 'not '}"
f"to be an input set.")
if isinstance(actual_data, list):
if len(actual_data) != len(expected_data):
raise RuntimeError(
f"{arg_type} {value} expected to be used as Multiple Input Set with "
f"length = {len(expected_data)}, but has length = {len(actual_data)}.")
# Checking input set.
for expected_elem, actual_elem in zip(expected_data, actual_data):
self._check_call_arg_meta_data(expected_elem, actual_elem, arg_type, value)
else:
if len(actual_data) == 0:
# Skip the validation on empty batches
return
if expected_data.layout() != actual_data.layout():
raise_err('layout', actual_data.layout(), expected_data.layout())
if expected_data.dtype != actual_data.dtype:
raise_err('dtype', actual_data.dtype, expected_data.dtype)
expected_ndim, actual_ndim = len(expected_data[0].shape()), len(actual_data[0].shape())
if expected_ndim != actual_ndim:
raise_err('ndim', actual_ndim, expected_ndim)
def _prep_input_sets(self, inputs):
inputs = list(inputs)
for i, input in enumerate(inputs):
# Transforming any convertible datatype to
# TensorList (DataNodeDebugs are already unpacked).
# Additionally accepting input sets, but only as list of TensorList.
if (not isinstance(input, (_tensors.TensorListCPU, _tensors.TensorListGPU))
and not (isinstance(input, list) and all([
isinstance(elem, (_tensors.TensorListCPU, _tensors.TensorListGPU))
for elem in input]))):
inputs[i] = _transform_data_to_tensorlist(
input, len(input), device_id=self._device_id)
return _build_input_sets(inputs, self._op_name)
def _update_classification(self, old_collection, position, new_classification):
"""Keeps the data classification up to date in case of running the conditional mode or
split and merge operations producing empty batches.
Otherwise it is no-op.
Parameters
----------
old_collection : list or dict
The old classification - list of input classification or dictionary of kwarg
classification
position : int or str
The lookup to the currently examinet element in the `old_collection`
new_classification : _Classification
New classification of the input/kwarg
"""
# If the old classification was empty, it may be invalid due to the pass-through
# behaviour on emtpy batches, so we need to update it with the new one
if old_collection[position].is_batch and len(old_collection[position].data) == 0:
old_collection[position] = new_classification
return new_classification
return old_collection[position]
def run(self, inputs, kwargs):
"""Checks correctness of inputs and kwargs and runs the backend operator."""
self._check_arg_len(self._expected_inputs_size, len(inputs), 'inputs')
self._check_arg_len(len(self._kwargs_classification), len(kwargs), 'keyword arguments')
# TODO(klecki): Tis will work only with DataNodes
if _conditionals.conditionals_enabled():
inputs, kwargs = _conditionals.apply_conditional_split_to_args(inputs, kwargs)
input_data_nodes_bkp = inputs
call_args = {}
inputs = list(inputs)
# Check inputs classification as batches and extract data from DataNodeDebugs.
for i, (input,
expected_classification) in enumerate(zip(inputs, self._inputs_classification)):
classification = _Classification(input, f'Input {i}')
expected_classification = self._update_classification(self._inputs_classification, i,
classification)
self._check_batch_classification(expected_classification.is_batch,
classification.is_batch,
'Input',
i)
self._check_device_classification(expected_classification.device,
classification.device,
'Input',
i)
if classification.is_batch:
if self.op_helper.schema_name != "_conditional__Merge":
self._check_batch_size(classification, i)
self._check_call_arg_meta_data(
expected_classification.data, classification.data, 'Input', i)
if classification.device != ('gpu' if self._device == 'gpu' else 'cpu'):
raise RuntimeError(
f"Cannot call {self._device.upper()} operator '{self._op_name}' with "
f"{classification.device.upper()} input {i}.")
inputs[i] = classification.data
input_sets = self._prep_input_sets(inputs)
# Check kwargs classification as batches and setup call args.
for key, value in kwargs.items():
classification = _Classification(value, f'Argument {key}',
arg_constant_len=self._batch_size)
self._update_classification(self._kwargs_classification, key, classification)
self._check_batch_classification(self._kwargs_classification[key].is_batch,
classification.is_batch, 'Argument', key)
self._check_device_classification(self._kwargs_classification[key].device,
classification.device, 'Argument', key)
if not classification.is_batch and classification.data != self._init_args[key] and \
not (math.isnan(classification.data) and math.isnan(self._init_args[key])):
raise RuntimeError(
f"Argument '{key}' for operator '{self._op_name}' unexpectedly changed"
f" value from '{self._init_args[key]}' to '{classification.data}'")
if classification.is_batch:
self._check_call_arg_meta_data(self._kwargs_classification[key].data,
classification.data, 'Argument', key)
call_args[key] = classification.data
if _conditionals.conditionals_enabled():
# Did we manage to succefuly extract a input batch, but the input was not directly
# produced by DALI
# TODO(klecki): Add better handling of constant nodes for conditionals in debug mode.
for i, classification in enumerate(self._inputs_classification):
if classification.is_batch and not classification.was_data_node:
raise ValueError(f"Debug mode with conditional execution"
f" (when `enable_conditionals=True`) doesn't allow for"
f" modification of operator outputs by libraries other than"
f" DALI or using the TensorLists extracted via `.get()` as"
f" inputs. Expected `DataNodeDebug` as an input, got"
f" {type(classification.original)} at input {i}.")
for key, classification in self._kwargs_classification.items():
if classification.is_batch and not classification.was_data_node:
raise ValueError(f"Debug mode with conditional execution"
f" (when `enable_conditionals=True`) doesn't allow for"
f" modification of operator outputs by libraries other than"
f" DALI or using the TensorLists extracted via `.get()` as"
f" inputs. Expected `DataNodeDebug` as an input, got"
f" {type(classification.original)} for argument '{key}'.")
res = [
self._pipe._run_op_on_device(self._op_name, logical_id, self._device, input, call_args)
for input, logical_id in zip(input_sets, self.logical_ids)
]
# Set iteration batch size if it wasn't set already.
self._pipe._cur_iter_batch_info.set_if_empty(len(res[0][0]), self._source_context)
if len(res) == 1:
res = self._pack_to_data_node_debug(res[0])
else:
res = _repack_output_sets(res)
res = self._pack_to_data_node_debug(res)
if _conditionals.conditionals_enabled():
# Work with not-processed input DataNodes, so we can detect which scope we should go
_conditionals.register_data_nodes(res, input_data_nodes_bkp, kwargs)
if self.op_helper.schema_name != "_conditional__Split":
res = list(_conditionals.apply_conditional_split_to_branch_outputs(res))
if len(res) == 1:
return res[0]
return res
class _PipelineDebug(_pipeline.Pipeline):
"""Debug mode for pipeline. Allows access to data inside the pipeline execution."""
def __init__(self, exec_func, **kwargs):
super().__init__(**kwargs)
self._debug_on = False
self._external_sources = {}
self._feed_input_data = {}
self._exec_func = exec_func
self._cur_operator_id = -1
self._next_logical_id = 0
self._seed_upper_bound = (1 << 31) - 1
self._operators = {}
self._operators_built = False
self._cur_iter_batch_info = _IterBatchInfo(-1, None) # Used for variable batch sizes.
device_id = self._device_id if self._device_id is not None else _types.CPU_ONLY_DEVICE_ID
self._pipe = _b.PipelineDebug(self._max_batch_size, self._num_threads, device_id,
self._set_affinity)
import numpy as np
seed = kwargs.get('seed', -1)
if seed < 0:
seed = np.random.randint(self._seed_upper_bound)
self._seed_generator = np.random.default_rng(seed)
def __enter__(self):
raise RuntimeError(
"Currently pipeline in debug mode works only with `pipeline_def` decorator."
"Using `with` statement is not supported.")
def build(self):
"""Build the pipeline.
Symbolic version of build from the standard pipeline.
In debug mode operators are built during
the first run of the pipeline.
Refer to :meth:`Pipeline.build() <nvidia.dali.Pipeline.build>` for details."""
self._built = True
def run(self):
"""Run the pipeline and return the result."""
import numpy as np
if not self._built:
raise RuntimeError('Pipeline must be built first.')
self._debug_on = True
self._cur_operator_id = -1
self._cur_iter_batch_info.reset()
_pipeline.Pipeline.push_current(self)
res = self._exec_func()
if res is None:
res = ()
elif not isinstance(res, tuple):
res = (res, )
self._debug_on = False
if not self._operators_built:
self._operators_built = True
_pipeline.Pipeline.pop_current()
# Transforming all variables to TensorLists.
outputs = []
for i, val in enumerate(res):
if isinstance(val, DataNodeDebug):
outputs.append(val.get())
elif isinstance(val, (list, tuple)):
raise TypeError(
f'Illegal pipeline output type.'
f'The output {i} contains a nested `DataNodeDebug`')
else:
outputs.append(
_tensors.TensorListCPU(
np.tile(val, (self._max_batch_size, *[1] * np.array(val).ndim))))
# Reset the stack, so we retrace for the next iteration
self._condition_stack = _conditionals._ConditionStack()
return tuple(outputs)
def feed_input(self, data_node, data, **kwargs):
"""Pass data to an ExternalSource operator inside the pipeline.
Refer to :meth:`Pipeline.feed_input() <nvidia.dali.Pipeline.feed_input>` for details."""
if not self._built:
raise RuntimeError("Pipeline must be built first.")
if isinstance(data_node, str):
name = data_node
else:
_check(data_node)
name = data_node.name
if name not in self._external_sources:
# Saving data, because pipeline hasn't been run yet.
if name not in self._feed_input_data:
self._feed_input_data[name] = []
self._feed_input_data[name].append((data, kwargs))
else:
self._external_sources[name]._feed_input(name, data, kwargs)
def _create_op(self, op_class, op_name, key, cur_context, inputs, kwargs):
"""Creates direct operator."""
self._operators[key] = _OperatorManager(
op_class, op_name, self, cur_context, self._next_logical_id, self._max_batch_size,
self._device_id, self._seed_generator.integers(self._seed_upper_bound), inputs, kwargs)
self._pipe.AddMultipleOperators(self._operators[key].op_spec,
self._operators[key].logical_ids)
self._next_logical_id = self._operators[key].logical_ids[-1] + 1
def _check_external_source_batch_size(self, data, cur_context):
if isinstance(data, list):
for i, output in enumerate(data):
self._cur_iter_batch_info.check_external_source(len(output.get()), cur_context, i)
else:
self._cur_iter_batch_info.check_external_source(len(data.get()), cur_context)
def _external_source(self, name=None, **kwargs):
self._cur_operator_id += 1
cur_frame = inspect.currentframe().f_back.f_back
key = inspect.getframeinfo(cur_frame)[:3] + (self._cur_operator_id, )
if not self._operators_built:
es = _ExternalSourceDebug(batch_size=self._max_batch_size,
device_id=self._device_id, name=name, **kwargs)
# feed_input all data collected after build and before run
for (data, fi_kwargs) in self._feed_input_data.pop(name, []):
es._feed_input(data, fi_kwargs)
self._external_sources[key] = es
if key in self._external_sources:
data = self._external_sources[key]._fetch(self._epoch_idx)
self._check_external_source_batch_size(
data, ''.join(traceback.format_stack(cur_frame, limit=1)))
return data
else:
raise RuntimeError("Unexpected operator 'ExternalSource'. Debug mode does not support"
" changing the order of operators executed within the pipeline.")
def _run_op_on_device(self, op_name, logical_id, device, inputs, kwargs):
# TODO(klecki): Readers are not compatible with requesting batch size, request batch = -1
# for anything without input
def is_converted_to_batch(elem):
return isinstance(elem, (_tensors.TensorListCPU, _tensors.TensorGPU))
batch_input = any(is_converted_to_batch(input) for input in inputs)
batch_input = batch_input or any(is_converted_to_batch(arg) for _, arg in kwargs.items())
if batch_input:
requested_size = self._cur_iter_batch_info.size
else:
requested_size = -1
if device == 'gpu':
return self._pipe.RunOperatorGPU(logical_id, inputs, kwargs, requested_size)
if device == 'cpu':
return self._pipe.RunOperatorCPU(logical_id, inputs, kwargs, requested_size)
if device == 'mixed':
return self._pipe.RunOperatorMixed(logical_id, inputs, kwargs, requested_size)
raise ValueError(f"Unknown device: '{device}' in operator '{op_name}'.")
def _run_op(self, op_helper, inputs, kwargs):
return op_helper.run(inputs, kwargs)
@staticmethod
def _extract_data_node_inputs(inputs):
"""
Extracts DataNodeDebugs from inputs for
arithmetic operator and transforms data to GPU if needed.
"""
data_nodes = []
to_gpu = any(
[input.device == 'gpu' for input in inputs if isinstance(input, DataNodeDebug)])
for input in inputs:
if isinstance(input, DataNodeDebug):
if to_gpu and input.device != 'gpu':
data_nodes.append(input.gpu())
else:
data_nodes.append(input)
return data_nodes
def _wrap_op_call(self, op_class, op_name, *inputs, **kwargs):
self._cur_operator_id += 1
cur_frame = inspect.currentframe().f_back.f_back
cur_context = ''.join(traceback.format_stack(cur_frame, limit=1))
key = inspect.getframeinfo(cur_frame)[:3] + (self._cur_operator_id, )
if not self._operators_built:
self._create_op(op_class, op_name, key, cur_context, inputs, kwargs)
if key in self._operators:
if op_name == 'arithmetic_generic_op':
inputs = _PipelineDebug._extract_data_node_inputs(inputs)
return self._run_op(self._operators[key], inputs, kwargs)
else:
raise RuntimeError(f"Unexpected operator '{op_name}'. Debug mode does not support"
" changing the order of operators executed within the pipeline.")
|
DALI-main
|
dali/python/nvidia/dali/_debug_mode.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-name-in-module, unused-import
from nvidia.dali.backend import TensorCPU, TensorListCPU, TensorGPU, TensorListGPU # noqa: F401
def _transfer_to_cpu(data, device):
if device.lower() == 'gpu':
return data.as_cpu()
return data
def _join_string(data, crop, edgeitems, sep=', '):
if crop:
data = data[:edgeitems] + ['...'] + data[-edgeitems:]
return sep.join(data)
def _tensor_to_string(self):
""" Returns string representation of Tensor."""
import numpy as np
type_name = type(self).__name__
indent = ' ' * 4
layout = self.layout()
data = np.array(_transfer_to_cpu(self, type_name[-3:]))
data_str = np.array2string(data, prefix=indent, edgeitems=2)
params = [f'{type_name}(\n{indent}{data_str}', f'dtype={self.dtype}'] + \
([f'layout={layout}'] if layout else []) + \
[f'shape={self.shape()})']
return _join_string(params, False, 0, ',\n' + indent)
def _tensorlist_to_string(self, indent=''):
""" Returns string representation of TensorList."""
import numpy as np
edgeitems = 2
spaces_indent = indent + ' ' * 4
type_name = type(self).__name__
layout = self.layout()
data = _transfer_to_cpu(self, type_name[-3:])
data_str = '[]'
crop = False
if data:
if data.is_dense_tensor():
data_str = np.array2string(np.array(data.as_tensor()),
prefix=spaces_indent, edgeitems=edgeitems)
else:
data = list(map(np.array, data))
# Triggers summarization if total number of elements exceeds 1000
# (empty tensor is treated as 1 element).
crop = len(data) > 2 * edgeitems + 1 and sum(max(arr.size, 1) for arr in data) > 1000
if crop:
data = data[:edgeitems] + data[-edgeitems:]
# Separator matching numpy standard.
sep = '\n' * data[0].ndim + spaces_indent
data = [np.array2string(tensor, prefix=spaces_indent, edgeitems=edgeitems)
for tensor in data]
data_str = f'[{_join_string(data, crop, edgeitems, sep)}]'
shape = self.shape()
shape_len = len(shape)
shape_prefix = 'shape=['
shape_crop = shape_len > 16 or (shape_len > 2 * edgeitems + 1 and
shape_len * len(shape[0]) > 100)
shape = list(map(str, shape))
shape_str = _join_string(shape, shape_crop, edgeitems)
if len(shape_str) > 75:
# Break shapes into separate lines.
shape_str = _join_string(shape, shape_crop, edgeitems, ', \n' +
spaces_indent + ' ' * len(shape_prefix))
params = [f'{type_name}(\n{spaces_indent}{data_str}', f'dtype={self.dtype}'] + \
([f'layout="{layout}"'] if layout else []) + \
[f'num_samples={len(self)}', f'{shape_prefix}{shape_str}])']
return _join_string(params, False, 0, ',\n' + spaces_indent)
|
DALI-main
|
dali/python/nvidia/dali/tensors.py
|
# Copyright (c) 2017-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-name-in-module,unused-import
from enum import Enum, unique
import re
from nvidia.dali.backend_impl.types import DALIDataType, DALIImageType, DALIInterpType
# TODO: Handle forwarding imports from backend_impl
from nvidia.dali.backend_impl.types import * # noqa: F401, F403
try:
from nvidia.dali import tfrecord as tfrec
_tfrecord_support = True
except ImportError:
_tfrecord_support = False
def _to_list(func):
def _to_list_instance(val):
if isinstance(val, (list, tuple)):
return [func(v) for v in val]
else:
return [func(val)]
return _to_list_instance
def _not_implemented(val):
raise NotImplementedError()
_known_types = {
DALIDataType.INT8: ("int", int),
DALIDataType.INT16: ("int", int),
DALIDataType.INT32: ("int", int),
DALIDataType.INT64: ("int", int),
DALIDataType.UINT8: ("int", int),
DALIDataType.UINT16: ("int", int),
DALIDataType.UINT32: ("int", int),
# DALIDataType.UINT64: ("int", int), # everything else fits into the Python int
DALIDataType.FLOAT: ("float", float),
DALIDataType.BOOL: ("bool", bool),
DALIDataType.STRING: ("str", str),
DALIDataType._BOOL_VEC: ("bool", _to_list(bool)),
DALIDataType._INT32_VEC: ("int", _to_list(int)),
DALIDataType._STRING_VEC: ("str", _to_list(str)),
DALIDataType._FLOAT_VEC: ("float", _to_list(float)),
DALIDataType.IMAGE_TYPE: ("nvidia.dali.types.DALIImageType", lambda x: DALIImageType(int(x))),
DALIDataType.DATA_TYPE: ("nvidia.dali.types.DALIDataType", lambda x: DALIDataType(int(x))),
DALIDataType.INTERP_TYPE:
("nvidia.dali.types.DALIInterpType", lambda x: DALIInterpType(int(x))),
DALIDataType.TENSOR_LAYOUT: (":ref:`layout str<layout_str_doc>`", lambda x: str(x)),
DALIDataType.PYTHON_OBJECT: ("object", lambda x: x),
DALIDataType._TENSOR_LAYOUT_VEC:
(":ref:`layout str<layout_str_doc>`", _to_list(lambda x: str(x))),
DALIDataType._DATA_TYPE_VEC: ("nvidia.dali.types.DALIDataType",
_to_list(lambda x: DALIDataType(int(x))))
}
_vector_types = {
DALIDataType._BOOL_VEC: DALIDataType.BOOL,
DALIDataType._INT32_VEC: DALIDataType.INT32,
DALIDataType._STRING_VEC: DALIDataType.STRING,
DALIDataType._FLOAT_VEC: DALIDataType.FLOAT,
DALIDataType._TENSOR_LAYOUT_VEC: DALIDataType.TENSOR_LAYOUT,
DALIDataType._DATA_TYPE_VEC: DALIDataType.DATA_TYPE
}
if _tfrecord_support:
_known_types[DALIDataType.FEATURE] = ("nvidia.dali.tfrecord.Feature", tfrec.Feature)
_known_types[DALIDataType._FEATURE_VEC] = ("nvidia.dali.tfrecord.Feature or "
"list of nvidia.dali.tfrecord.Feature",
_to_list(tfrec.Feature))
_known_types[DALIDataType._FEATURE_DICT] = ("dict of (string, nvidia.dali.tfrecord.Feature)",
_not_implemented)
def _type_name_convert_to_string(dtype, allow_tensors):
if dtype in _known_types:
type_name = _known_types[dtype][0]
if dtype in _enum_types:
type_name = f":class:`{type_name}`"
ret = type_name
if dtype in _vector_types:
ret += " or list of " + type_name
if allow_tensors:
ret += " or TensorList of " + type_name
return ret
else:
raise RuntimeError(str(dtype) + " does not correspond to a known type.")
def _type_convert_value(dtype, val):
if dtype not in _known_types:
raise RuntimeError(str(dtype) + " does not correspond to a known type.")
return _known_types[dtype][1](val)
def _vector_element_type(dtype):
if dtype not in _vector_types:
raise RuntimeError(str(dtype) + " is not a vector type.")
return _vector_types[dtype]
def _default_converter(dtype, default_value):
if dtype in _enum_types:
return str(_type_convert_value(dtype, default_value))
else:
return repr(_type_convert_value(dtype, default_value))
# avoid importing NumPy if to_numpy_type is not called to break strong NumPy dependency
_numpy_types = None
def to_numpy_type(dali_type):
"""
Converts DALIDataType to NumPy type
Args
----
dali_type: DALIDataType
Input type to convert
"""
import numpy as np
global _numpy_types
if _numpy_types is None:
_numpy_types = {
DALIDataType.UINT8: np.uint8,
DALIDataType.UINT16: np.uint16,
DALIDataType.UINT32: np.uint32,
DALIDataType.UINT64: np.uint64,
DALIDataType.INT8: np.int8,
DALIDataType.INT16: np.int16,
DALIDataType.INT32: np.int32,
DALIDataType.INT64: np.int64,
DALIDataType.FLOAT16: np.float16,
DALIDataType.FLOAT: np.float32,
DALIDataType.FLOAT64: np.float64,
DALIDataType.BOOL: np.bool_
}
return _numpy_types[dali_type]
@unique
class PipelineAPIType(Enum):
"""Pipeline API type
"""
BASIC = 0
ITERATOR = 1
SCHEDULED = 2
class CUDAStream:
"""Wrapper class for a CUDA stream."""
def __init__(self, ptr=0):
self._ptr = ptr
@property
def ptr(self):
"""Raw CUDA stream pointer, stored as uint64."""
return self._ptr
_bool_types = [DALIDataType.BOOL]
_int_types = [
DALIDataType.INT8, DALIDataType.INT16, DALIDataType.INT32, DALIDataType.INT64,
DALIDataType.UINT8, DALIDataType.UINT16, DALIDataType.UINT32, DALIDataType.UINT64
]
_float_types = [DALIDataType.FLOAT16, DALIDataType.FLOAT, DALIDataType.FLOAT64]
_int_like_types = _bool_types + _int_types
_all_types = _bool_types + _int_types + _float_types
_enum_types = [DALIDataType.IMAGE_TYPE, DALIDataType.DATA_TYPE, DALIDataType.INTERP_TYPE]
class ScalarConstant(object):
"""
.. note::
This class should not be instantiated directly; use :func:`Constant` function
with appropriate arguments to create instances of this class.
Wrapper for a constant value that can be used in DALI :ref:`mathematical expressions`
and applied element-wise to the results of DALI Operators representing Tensors in
:meth:`nvidia.dali.Pipeline.define_graph` step.
ScalarConstant indicates what type should the value be treated as with respect
to type promotions. The actual values passed to the backend from python
would be `int32` for integer values and `float32` for floating point values.
Python builtin types `bool`, `int` and `float` will be marked to indicate
:const:`nvidia.dali.types.DALIDataType.BOOL`, :const:`nvidia.dali.types.DALIDataType.INT32`,
and :const:`nvidia.dali.types.DALIDataType.FLOAT` respectively.
Args
----
value: bool or int or float
The constant value to be passed to DALI expression.
dtype: DALIDataType, optional
Target type of the constant to be used in types promotions.
"""
def __init__(self, value, dtype=None):
self.shape = []
value_dtype = getattr(value, "dtype", None) # handle 0D tensors and numpy scalars
if value_dtype is not None:
dali_type = to_dali_type(value.dtype)
if dali_type in _int_types:
value = int(value)
elif dali_type in _float_types:
value = float(value)
elif dali_type in _bool_types:
value = bool(value)
if dtype is None:
dtype = dali_type
if not isinstance(value, (bool, int, float)):
raise TypeError(
f"Expected scalar value of type 'bool', 'int' or 'float', got {type(value)}.")
if dtype:
self.dtype = dtype
if self.dtype in _bool_types:
self.value = bool(value)
elif self.dtype in _int_types:
self.value = int(value)
elif self.dtype in _float_types:
self.value = float(value)
else:
raise TypeError(
f"DALI ScalarConstant can only hold one of: {_all_types} types.")
elif isinstance(value, bool):
self.value = value
self.dtype = DALIDataType.BOOL
elif isinstance(value, int):
self.value = value
self.dtype = DALIDataType.INT32
elif isinstance(value, float):
self.value = value
self.dtype = DALIDataType.FLOAT
def bool(self):
return ScalarConstant(self.value, DALIDataType.BOOL)
def int8(self):
return ScalarConstant(self.value, DALIDataType.INT8)
def int16(self):
return ScalarConstant(self.value, DALIDataType.INT16)
def int32(self):
return ScalarConstant(self.value, DALIDataType.INT32)
def int64(self):
return ScalarConstant(self.value, DALIDataType.INT64)
def uint8(self):
return ScalarConstant(self.value, DALIDataType.UINT8)
def uint16(self):
return ScalarConstant(self.value, DALIDataType.UINT16)
def uint32(self):
return ScalarConstant(self.value, DALIDataType.UINT32)
def uint64(self):
return ScalarConstant(self.value, DALIDataType.UINT64)
def float16(self):
return ScalarConstant(self.value, DALIDataType.FLOAT16)
def float32(self):
return ScalarConstant(self.value, DALIDataType.FLOAT)
def float64(self):
return ScalarConstant(self.value, DALIDataType.FLOAT64)
def __eq__(self, other):
if isinstance(other, ScalarConstant):
return self.value == other.value and self.dtype == other.dtype
# Delegate the call to the `__eq__` of other object, most probably a `DataNode`
return other.__eq__(self)
def __ne__(self, other):
if isinstance(other, ScalarConstant):
return self.value != other.value or self.dtype != other.dtype
# Delegate the call to the `__ne__` of other object, most probably a `DataNode`
return other.__ne__(self)
def __bool__(self):
if self.dtype in _int_like_types:
return bool(self.value)
raise TypeError(f"DALI ScalarConstant must be converted to one of bool or int types: "
f"({_int_like_types}) explicitly before casting to builtin `bool`.")
def __int__(self):
if self.dtype in _int_like_types:
return int(self.value)
raise TypeError(f"DALI ScalarConstant must be converted to one of bool or int types: "
f"({_int_like_types}) explicitly before casting to builtin `int`.")
def __float__(self):
if self.dtype in _float_types:
return self.value
raise TypeError(f"DALI ScalarConstant must be converted to one of the float types: "
f"({_float_types}) explicitly before casting to builtin `float`.")
def __str__(self):
return "{}:{}".format(self.value, self.dtype)
def __repr__(self):
return "{}".format(self.value)
def _is_scalar_shape(shape):
return shape is None or shape == () or shape == [] or shape == 1 or \
shape == [1] or shape == (1,) # legacy pseudo-scalars
def _is_true_scalar(value):
return len(getattr(value, "shape", ())) == 0
def _is_mxnet_array(value):
return 'mxnet.ndarray.ndarray.NDArray' in str(type(value))
def _is_torch_tensor(value):
return 'torch.Tensor' in str(type(value))
def _is_numpy_array(value):
type_name = str(type(value))
return 'numpy.ndarray' in type_name or \
'numpy.int' in type_name or \
'numpy.uint' in type_name or \
'numpy.float' in type_name
def _raw_cuda_stream(stream_obj):
if stream_obj is None:
return None
elif hasattr(stream_obj, "cuda_stream"): # torch
return stream_obj.cuda_stream
elif hasattr(stream_obj, "ptr"): # cupy
return stream_obj.ptr
else:
return stream_obj
def _get_default_stream_for_array(array):
if isinstance(array, list) and len(array):
array = array[0]
if _is_torch_tensor(array):
import torch
return _raw_cuda_stream(torch.cuda.current_stream())
elif _is_cupy_array(array):
import cupy
return _raw_cuda_stream(cupy.cuda.get_current_stream())
else:
return None
def _get_device_id_for_array(array):
if isinstance(array, list) and len(array):
array = array[0]
if _is_torch_tensor(array):
return array.device.index
elif _is_cupy_array(array):
return array.device
elif _is_mxnet_array(array):
return array.context.device_id
else:
return None
_cupy_array_type_regex = re.compile('.*cupy.*\..*ndarray.*') # noqa: W605
def _is_cupy_array(value):
return _cupy_array_type_regex.match(str(type(value)))
# common type names used by numpy, torch and possibly
_type_name_to_dali_type = {
'bool': DALIDataType.BOOL,
'boolean': DALIDataType.BOOL,
'int8': DALIDataType.INT8,
'sbyte': DALIDataType.INT8,
'uint8': DALIDataType.UINT8,
'byte': DALIDataType.UINT8,
'ubyte': DALIDataType.UINT8,
'int16': DALIDataType.INT16,
'short': DALIDataType.INT16,
'uint16': DALIDataType.UINT16,
'ushort': DALIDataType.UINT16,
'int32': DALIDataType.INT32,
'uint32': DALIDataType.UINT32,
'int64': DALIDataType.INT64,
'long': DALIDataType.INT64,
'uint64': DALIDataType.UINT64,
'ulong': DALIDataType.UINT64,
'half': DALIDataType.FLOAT16,
'float16': DALIDataType.FLOAT16,
'float': DALIDataType.FLOAT,
'float32': DALIDataType.FLOAT,
'float64': DALIDataType.FLOAT64,
'double': DALIDataType.FLOAT64,
}
dali_type_converters = []
def to_dali_type(framework_type):
t = str(framework_type)
if t.startswith('torch.'):
t = t[6:]
t = _type_name_to_dali_type.get(t)
if t is None:
raise TypeError(f"'{framework_type}' could not be converted into any known DALIDataType.")
return t
def _is_compatible_array_type(value):
return _is_numpy_array(value) or _is_mxnet_array(value) or _is_torch_tensor(value)
def _preprocess_constant_array_type(value):
if _is_mxnet_array(value):
# mxnet ndarray is not directly compatible with numpy.ndarray, but provides conversion
value = value.asnumpy()
if _is_numpy_array(value):
import numpy as np
# 64-bit types require explicit dtype
if value.dtype == np.float64:
value = value.astype(np.float32)
if value.dtype == np.int64:
value = value.astype(np.int32)
if value.dtype == np.uint64:
value = value.astype(np.uint32)
return value
def ConstantNode(device, value, dtype, shape, layout, **kwargs):
data = value
if _is_compatible_array_type(value):
value = _preprocess_constant_array_type(value)
# At this point value is a numpy array or a torch tensor. They have very similar API
actual_type = to_dali_type(value.dtype)
if dtype is None:
dtype = actual_type
if shape is not None:
value = value.reshape(shape)
else:
shape = list(value.shape) # torch uses torch.Size instead of list
data = value.flatten().tolist()
else:
def _type_from_value_or_list(v):
if not isinstance(v, (list, tuple)):
v = [v]
has_floats = False
has_ints = False
has_bools = False
for x in v:
if isinstance(x, float):
has_floats = True
elif isinstance(x, bool):
has_bools = True
elif isinstance(x, int):
has_ints = True
else:
raise TypeError("Unexpected type: " + str(type(x)))
if has_floats:
return DALIDataType.FLOAT
if has_ints:
return DALIDataType.INT32
if has_bools:
return DALIDataType.BOOL
# empty list defaults to float
return DALIDataType.FLOAT
actual_type = _type_from_value_or_list(value)
if dtype is None:
dtype = actual_type
import nvidia.dali.fn as fn
def _convert(x, type):
if isinstance(x, (list, tuple)):
return [type(y) for y in x]
return type(x)
isint = actual_type in _int_like_types
idata = _convert(data, int) if isint else None
fdata = None if isint else data
if device is None:
device = "cpu"
return fn.constant(device=device, fdata=fdata, idata=idata, shape=shape, dtype=dtype,
layout=layout, **kwargs)
def _is_scalar_value(value):
if value is None:
return True
if isinstance(value, (bool, int, float)):
return True
return not _is_compatible_array_type(value) or _is_scalar_shape(value.shape)
def Constant(value, dtype=None, shape=None, layout=None, device=None, **kwargs):
"""Wraps a constant value which can then be used in
:meth:`nvidia.dali.Pipeline.define_graph` pipeline definition step.
If the `value` argument is a scalar and neither `shape`, `layout` nor
`device` is provided, the function will return a :class:`ScalarConstant`
wrapper object, which receives special, optimized treatment when used in
:ref:`mathematical expressions`.
Otherwise, the function creates a `dali.ops.Constant` node, which produces
a batch of constant tensors.
Args
----
value: `bool`, `int`, `float`, a `list` or `tuple` thereof or a `numpy.ndarray`
The constant value to wrap. If it is a scalar, it can be used as scalar
value in mathematical expressions. Otherwise, it will produce a constant
tensor node (optionally reshaped according to `shape` argument).
If this argument is is a numpy array, a PyTorch tensor or an MXNet array,
the values of `shape` and `dtype` will default to `value.shape` and `value.dtype`,
respectively.
dtype: DALIDataType, optional
Target type of the constant.
shape: list or tuple of int, optional
Requested shape of the output. If `value` is a scalar, it is broadcast
as to fill the requested shape. Otherwise, the number of elements in
`value` must match the volume of the shape.
layout: string, optional
A string describing the layout of the constant tensor, e.g. "HWC"
device: string, optional, "cpu" or "gpu"
The device to place the constant tensor in. If specified, it forces
the value to become a constant tensor node on given device,
regardless of `value` type or `shape`.
**kwargs: additional keyword arguments
If present, it forces the constant to become a Constant tensor node
and the arguments are passed to the `dali.ops.Constant` operator
"""
if (device is not None
or (_is_compatible_array_type(value) and not _is_true_scalar(value))
or isinstance(value, (list, tuple))
or not _is_scalar_shape(shape)
or kwargs
or layout is not None):
return ConstantNode(device, value, dtype, shape, layout, **kwargs)
else:
return ScalarConstant(value, dtype)
class SampleInfo:
"""
Describes the indices of a sample requested from :meth:`nvidia.dali.fn.external_source`
:ivar idx_in_epoch: 0-based index of the sample within epoch
:ivar idx_in_batch: 0-based index of the sample within batch
:ivar iteration: number of current batch within epoch
:ivar epoch_idx: number of current epoch
"""
def __init__(self, idx_in_epoch, idx_in_batch, iteration, epoch_idx):
self.idx_in_epoch = idx_in_epoch
self.idx_in_batch = idx_in_batch
self.iteration = iteration
self.epoch_idx = epoch_idx
class BatchInfo:
"""
Describes the batch requested from :meth:`nvidia.dali.fn.external_source`
:ivar iteration: number of current batch within epoch
:ivar epoch_idx: number of current epoch
"""
def __init__(self, iteration, epoch_idx):
self.iteration = iteration
self.epoch_idx = epoch_idx
|
DALI-main
|
dali/python/nvidia/dali/types.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-member
import sys
from nvidia.dali import backend as _b
from nvidia.dali import internal as _internal
from nvidia.dali.external_source import external_source
_special_case_mapping = {
"b_box": "bbox",
"mx_net": "mxnet",
"tf_record": "tfrecord"
}
def _handle_special_case(s):
for artifact, desired in _special_case_mapping.items():
s = s.replace(artifact, desired)
return s
def _to_snake_case(pascal):
out = ""
nupper = 0
start = 0
for i, c in enumerate(pascal):
if c.isupper():
if nupper == 0:
start = i
nupper += 1
elif c.islower():
if nupper == 0:
out += c
else:
# do not add another leading underscore
if len(out) > 0 and out[-1] != '_':
out += '_'
if nupper > 1:
out += pascal[start:i - 1].lower() + '_'
out += pascal[i - 1].lower()
out += c
nupper = 0
start = i + 1
else:
out += pascal[start:i + 1].lower()
start = i + 1
nupper = 0
if nupper > 0:
if len(out) and out[-1].islower():
out += '_'
out += pascal[start:].lower()
out = _handle_special_case(out)
return out
def _wrap_op_fn(op_class, wrapper_name, wrapper_doc):
def op_wrapper(*inputs, **kwargs):
import nvidia.dali.ops
init_args, call_args = nvidia.dali.ops._separate_kwargs(kwargs)
default_dev = nvidia.dali.ops._choose_device(inputs)
if default_dev == "gpu" and init_args.get("device") == "cpu":
raise ValueError("An operator with device='cpu' cannot accept GPU inputs.")
if "device" not in init_args:
init_args["device"] = default_dev
return op_class(**init_args)(*inputs, **call_args)
def fn_wrapper(*inputs, **kwargs):
from nvidia.dali._debug_mode import _PipelineDebug
current_pipeline = _PipelineDebug.current()
if getattr(current_pipeline, '_debug_on', False):
return current_pipeline._wrap_op_call(op_class, wrapper_name, *inputs, **kwargs)
else:
return op_wrapper(*inputs, **kwargs)
fn_wrapper.__name__ = wrapper_name
fn_wrapper.__qualname__ = wrapper_name
fn_wrapper.__doc__ = wrapper_doc
fn_wrapper._schema_name = op_class.schema_name
return fn_wrapper
def _wrap_op(op_class, submodule, parent_module, wrapper_doc):
"""Wrap the DALI Operator with fn API and insert the function into appropriate module.
Args:
op_class: Op class to wrap
submodule: Additional submodule (scope)
parent_module (str): If set to None, the wrapper is placed in nvidia.dali.fn module,
otherwise in a specified parent module.
wrapper_doc (str): Documentation of the wrapper function
"""
from nvidia.dali._utils import eager_utils
schema = _b.TryGetSchema(op_class.schema_name)
make_hidden = schema.IsDocHidden() if schema else False
wrapper_name = _to_snake_case(op_class.__name__)
# Add operator to eager API.
eager_utils._wrap_eager_op(op_class, submodule, parent_module,
wrapper_name, wrapper_doc, make_hidden)
if parent_module is None:
fn_module = sys.modules[__name__]
else:
fn_module = sys.modules[parent_module]
module = _internal.get_submodule(fn_module, submodule)
if not hasattr(module, wrapper_name):
wrap_func = _wrap_op_fn(op_class, wrapper_name, wrapper_doc)
setattr(module, wrapper_name, wrap_func)
if submodule:
wrap_func.__module__ = module.__name__
if make_hidden:
parent_module = _internal.get_submodule(fn_module, submodule[:-1])
setattr(parent_module, wrapper_name, wrap_func)
external_source.__module__ = __name__
external_source._schema_name = "ExternalSource"
|
DALI-main
|
dali/python/nvidia/dali/fn.py
|
# Copyright (c) 2017-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-member
from typing import List
from collections import deque
from nvidia.dali import backend as b
from nvidia.dali import types
from nvidia.dali import internal
from nvidia.dali._multiproc.pool import WorkerPool
from nvidia.dali import pickling as dali_pickle
from nvidia.dali import _conditionals
from threading import local as tls
from . import data_node as _data_node
import functools
import inspect
import warnings
import weakref
import ctypes
import sys
from .data_node import DataNode
pipeline_tls = tls()
DataNode.__module__ = __name__ # move to pipeline
def _show_deprecation_warning(deprecated, in_favor_of):
# show only this warning
with warnings.catch_warnings():
warnings.simplefilter("default")
warnings.warn("{} is deprecated, please use {} instead".format(deprecated, in_favor_of),
Warning, stacklevel=2)
class Pipeline(object):
"""Pipeline class is the base of all DALI data pipelines. The pipeline
encapsulates the data processing graph and the execution engine.
Parameters
----------
`batch_size` : int, optional, default = -1
Maximum batch size of the pipeline. Negative values for this parameter
are invalid - the default value may only be used with
serialized pipeline (the value stored in serialized pipeline
is used instead). In most cases, the actual batch size of the pipeline
will be equal to the maximum one. Running the DALI Pipeline with a smaller batch size
is also supported. The batch size might change from iteration to iteration.
Please note, that DALI might perform memory preallocations according to this
parameter. Setting it too high might result in out-of-memory failure.
`num_threads` : int, optional, default = -1
Number of CPU threads used by the pipeline.
Negative values for this parameter are invalid - the default
value may only be used with serialized pipeline (the value
stored in serialized pipeline is used instead).
`device_id` : int, optional, default = -1
Id of GPU used by the pipeline.
A None value for this parameter means that DALI should not use GPU nor CUDA runtime.
This limits the pipeline to only CPU operators but allows it to run on any CPU capable machine.
`seed` : int, optional, default = -1
Seed used for random number generation. Leaving the default value
for this parameter results in random seed.
`exec_pipelined` : bool, optional, default = True
Whether to execute the pipeline in a way that enables
overlapping CPU and GPU computation, typically resulting
in faster execution speed, but larger memory consumption.
`prefetch_queue_depth` : int or {"cpu_size": int, "gpu_size": int}, optional, default = 2
Depth of the executor pipeline. Deeper pipeline makes DALI
more resistant to uneven execution time of each batch, but it
also consumes more memory for internal buffers.
Specifying a dict:
``{ "cpu_size": x, "gpu_size": y }``
instead of an integer will cause the pipeline to use separated
queues executor, with buffer queue size `x` for cpu stage
and `y` for mixed and gpu stages. It is not supported when both `exec_async`
and `exec_pipelined` are set to `False`.
Executor will buffer cpu and gpu stages separatelly,
and will fill the buffer queues when the first :meth:`run`
is issued.
`exec_async` : bool, optional, default = True
Whether to execute the pipeline asynchronously.
This makes :meth:`run` method
run asynchronously with respect to the calling Python thread.
In order to synchronize with the pipeline one needs to call
:meth:`outputs` method.
`bytes_per_sample` : int, optional, default = 0
A hint for DALI for how much memory to use for its tensors.
`set_affinity` : bool, optional, default = False
Whether to set CPU core affinity to the one closest to the
GPU being used.
`max_streams` : int, optional, default = -1
Limit the number of CUDA streams used by the executor.
Value of -1 does not impose a limit.
This parameter is currently unused (and behavior of
unrestricted number of streams is assumed).
`default_cuda_stream_priority` : int, optional, default = 0
CUDA stream priority used by DALI. See `cudaStreamCreateWithPriority` in CUDA documentation
`enable_memory_stats`: bool, optional, default = 1
If DALI should print operator output buffer statistics.
Usefull for `bytes_per_sample_hint` operator parameter.
`py_num_workers`: int, optional, default = 1
The number of Python workers that will process ``ExternalSource`` callbacks.
The pool starts only if there is at least one ExternalSource with ``parallel`` set to True.
Setting it to 0 disables the pool and all ExternalSource operators fall back to non-parallel
mode even if ``parallel`` is set to True.
`py_start_method` : str, default = "fork"
Determines how Python workers are started. Supported methods:
* ``"fork"`` - start by forking the process
* ``"spawn"`` - start a fresh interpreter process
If ``spawn`` method is used, ExternalSource's callback must be picklable.
In order to use ``fork``, there must be no CUDA contexts acquired at the moment of starting
the workers. For this reason, if you need to build multiple pipelines that use Python workers,
you will need to call :meth:`start_py_workers` before calling :meth:`build` of any
of the pipelines. You can find more details and caveats of both methods in Python's
``multiprocessing`` module documentation.
`py_callback_pickler` : module or tuple, default = None
If `py_start_method` is set to *spawn*, callback passed to parallel
ExternalSource must be picklable.
If run in Python3.8 or newer with `py_callback_pickler` set to None, DALI uses customized pickle
when serializing callbacks to support serialization of local functions and lambdas.
However, if you need to serialize more complex objects like local classes or you are running
older version of Python you can provide external serialization package such as dill or
cloudpickle that implements two methods: `dumps` and `loads` to make DALI use them to serialize
external source callbacks. You can pass a module directly as ``py_callback_pickler``::
import dill
@pipeline_def(py_callback_pickler=dill, ...)
def create_pipeline():
src = fn.external_source(lambda sample_info: np.int32([42]), batch=False, parallel=True)
...
A valid value for `py_callback_pickler` is either a module/object implementing
``dumps`` and ``loads`` methods or a tuple where the first item is the module/object and
the next two optional parameters are extra kwargs to be passed when calling dumps and
loads respectively.
The provided methods and kwargs must be picklable with standard `pickle.dumps`.
If you run Python3.8 or newer with the default DALI pickler (`py_callback_pickler` = None),
you can hint DALI to serialize global functions by value rather than by reference
by decorating them with `@dali.pickling.pickle_by_value`. It may be especially useful when
working with Jupyter notebook to work around the issue of worker process being unable to import
the callback defined as a global function inside the notebook.
`output_dtype` : ``nvidia.dali.types.DALIDataType`` or list of those, default = None
With this argument, you may declare, what data type you expect in the given output. You shall
pass a list of mod:`types.DALIDataType`, each element in the list corresponding to
one output from the pipeline. Additionally, you can pass ``None`` as a wildcard. The outputs,
after each iteration, will be validated against the types you passed to this argument. If any
output does not match the provided type, RuntimeError will be raised.
If the ``output_dtype`` value is a single value (not a list), it will be broadcast to the
number of outputs from the pipeline.
`output_ndim` : int or list of ints, default = None
With this argument, you may declare, how many dimensions you expect in the given output.
You shall pass a list of integers, each element in the list corresponding to one output
from the pipeline.
Additionally, you can pass ``None`` as a wildcard. The outputs, after each iteration, will be
validated against the numbers of dimensions you passed to this argument. If the dimensionality
of any output does not match the provided ``ndim``, RuntimeError will be raised.
If the ``output_ndim`` value is a single value (not a list), it will be broadcast to the
number of outputs from the pipeline.
"""
def __init__(self,
batch_size=-1,
num_threads=-1,
device_id=-1,
seed=-1,
exec_pipelined=True,
prefetch_queue_depth=2,
exec_async=True,
bytes_per_sample=0,
set_affinity=False,
max_streams=-1,
default_cuda_stream_priority=0,
*,
enable_memory_stats=False,
py_num_workers=1,
py_start_method="fork",
py_callback_pickler=None,
output_dtype=None,
output_ndim=None):
self._sinks = []
self._max_batch_size = batch_size
self._num_threads = num_threads
self._device_id = device_id
self._seed = seed
self._exec_pipelined = exec_pipelined
# When initializing DALI, we do the following in order:
# * Discover the ops specified in Python, group the ExternalSources (_build_graph())
# * Start the Python workers pool (_start_py_workers())
# * Construct the C++ Pipeline backend and pass the graph to it (_init_pipeline_backend())
# * Build the pieline. (_pipe.Build())
# In case of deserialized pipeline, only _backend_prepared and _built will be True
self._py_graph_built = False
self._py_pool_started = False
self._backend_prepared = False
self._built = False
self._deserialized = False # Marked True when deserializing
self._first_iter = True
self._last_iter = False
self._iter = 0
self._epoch_idx = 0
self._batches_to_consume = 0
self._cpu_batches_to_consume = 0
self._gpu_batches_to_consume = 0
self._names_and_devices = None
self._exec_async = exec_async
self._bytes_per_sample = bytes_per_sample
self._set_affinity = set_affinity
self._max_streams = max_streams
self._default_cuda_stream_priority = default_cuda_stream_priority
self._py_num_workers = py_num_workers
self._py_start_method = py_start_method
if py_callback_pickler is not None and py_start_method == "fork":
raise ValueError(
"``py_callback_pickler`` should not be set when 'fork' start method is used.")
if py_callback_pickler is None and py_start_method == "spawn":
py_callback_pickler = dali_pickle._DaliPickle
self._py_callback_pickler = py_callback_pickler
self._api_type = None
self._skip_api_check = False
self._graph_out = None
self._ops = None
self._graph_outputs = None
self._py_pool = None
self._input_callbacks = None
self._parallel_input_callbacks = None
self._seq_input_callbacks = None
self._enable_memory_stats = enable_memory_stats
self._prefetch_queue_depth = prefetch_queue_depth
if type(prefetch_queue_depth) is dict:
self._exec_separated = True
self._cpu_queue_size = prefetch_queue_depth["cpu_size"]
self._gpu_queue_size = prefetch_queue_depth["gpu_size"]
elif type(prefetch_queue_depth) is int:
self._exec_separated = False
self._cpu_queue_size = prefetch_queue_depth
self._gpu_queue_size = prefetch_queue_depth
else:
raise TypeError("Expected prefetch_queue_depth to be either int or Dict[int, int]")
self._conditionals_enabled = False
self._condition_stack = None
# Assign and validate output_dtype
if isinstance(output_dtype, (list, tuple)):
for dtype in output_dtype:
if not isinstance(dtype, (types.DALIDataType, type(None))):
raise TypeError(
f"`output_dtype` must be either: a value from "
f"nvidia.dali.types.DALIDataType, a list of these or None. "
f"Found type {type(dtype)} in the list."
)
if dtype == types.NO_TYPE:
raise ValueError(
f"`output_dtype` can't be a types.NO_TYPE. Found {dtype} in the list.")
elif not isinstance(output_dtype, (types.DALIDataType, type(None))):
raise TypeError(
f"`output_dtype` must be either: a value from nvidia.dali.types.DALIDataType, a "
f"list of these or None. Found type: {type(output_dtype)}."
)
elif output_dtype == types.NO_TYPE:
raise ValueError(
f"`output_dtype` can't be a types.NO_TYPE. Found value: {output_dtype}")
self._output_dtype = output_dtype
# Assign and validate output_ndim
if isinstance(output_ndim, (list, tuple)):
for ndim in output_ndim:
if not isinstance(ndim, (int, type(None))):
raise TypeError(
f"`output_ndim` must be either: an int, a list of ints or None. "
f"Found type {type(ndim)} in the list."
)
if ndim is not None and ndim < 0:
raise ValueError(
f"`output_ndim` must be non-negative. Found value {ndim} in the list.")
elif not isinstance(output_ndim, (int, type(None))):
raise TypeError(
f"`output_ndim` must be either: an int, a list of ints or None. "
f"Found type: {type(output_ndim)}."
)
elif output_ndim is not None and output_ndim < 0:
raise ValueError(f"`output_ndim` must be non-negative. Found value: {output_ndim}.")
self._output_ndim = output_ndim
@property
def batch_size(self):
"""Batch size."""
_show_deprecation_warning("batch_size", "max_batch_size")
return self._max_batch_size
@property
def max_batch_size(self):
"""Maximum batch size."""
return self._max_batch_size
@property
def num_threads(self):
"""Number of CPU threads used by this pipeline."""
return self._num_threads
@property
def device_id(self):
"""Id of the GPU used by the pipeline or None for CPU-only pipelines."""
return None if self._device_id == types.CPU_ONLY_DEVICE_ID else self._device_id
@property
def seed(self):
"""Random seed used in the pipeline or None, if seed is not fixed."""
return self._seed
@property
def exec_pipelined(self):
"""If true, pipeline execution model is used."""
return self._exec_pipelined
@property
def exec_async(self):
"""If true, asynchronous execution is used."""
return self._exec_async
@property
def set_affinity(self):
"""If True, worker threads are bound to CPU cores."""
return self._set_affinity
@property
def max_streams(self):
"""Reserved for future use."""
return self._max_streams
@property
def prefetch_queue_depth(self):
"""Depth (or depths) of the prefetch queue, as specified in the ``__init__`` arguments."""
return self._prefetch_queue_depth
@property
def default_cuda_stream_priority(self):
"""Default priority of the CUDA streams used by this pipeline."""
return self._default_cuda_stream_priority
@property
def enable_memory_stats(self):
"""If True, memory usage statistics are gathered."""
return self._enable_memory_stats
@property
def py_num_workers(self):
"""The number of Python worker processes used by parallel ```external_source```."""
return self._py_num_workers
@property
def py_start_method(self):
"""The method of launching Python worker processes used by
parallel ```external_source```."""
return self._py_start_method
@property
def exec_separated(self):
"""If True, there are separate prefetch queues for CPU and GPU stages."""
return self._exec_separated
@property
def cpu_queue_size(self):
"""The number of iterations processed ahead by the CPU stage."""
return self._cpu_queue_size
@property
def gpu_queue_size(self):
"""The number of iterations processed ahead by the GPU stage."""
return self._gpu_queue_size
def output_dtype(self) -> list:
"""Data types expected at the outputs."""
return [elem if elem != types.NO_TYPE else None for elem in self._pipe.output_dtype()]
def output_ndim(self) -> list:
"""Number of dimensions expected at the outputs."""
return [elem if elem != -1 else None for elem in self._pipe.output_ndim()]
def epoch_size(self, name=None):
"""Epoch size of a pipeline.
If the `name` parameter is `None`, returns a dictionary of pairs
`(reader name, epoch size for that reader)`.
If the `name` parameter is not `None`, returns epoch size for that
reader.
Parameters
----------
name : str, optional, default = None
The reader which should be used to obtain epoch size.
"""
if not self._built:
raise RuntimeError("Pipeline must be built first.")
if name is not None:
return self._pipe.reader_meta(name)["epoch_size_padded"]
meta = self._pipe.reader_meta()
return {k: v["epoch_size_padded"] for k, v in meta.items()}
def executor_statistics(self):
"""Returns provided pipeline executor statistics metadata as a dictionary.
Each key in the dictionary is the operator name. To enable it use ``executor_statistics``
Available metadata keys for each operator:
* ``real_memory_size`` - list of memory sizes that is used by each output of
the operator. Index in the list corresponds to the output index.
* ``max_real_memory_size`` - list of maximum tensor size that is used by each output of
the operator. Index in the list corresponds to the output index.
* ``reserved_memory_size`` - list of memory sizes that is reserved for each of
the operator outputs. Index in the list corresponds to the output index.
* ``max_reserved_memory_size`` - list of maximum memory sizes per tensor that is
reserved for each of the operator outputs. Index in the list corresponds to
the output index.
"""
if not self._built:
raise RuntimeError("Pipeline must be built first.")
return self._pipe.executor_statistics()
def external_source_shm_statistics(self):
"""Returns parallel external source's statistics regarding shared memory consumption.
The returned dictionary contains following keys:
* ``capacities`` - a list of sizes (in bytes) of shared memory slots allocated to
accommodate data produced by the parallel external source.
* ``per_sample_capacities`` - a list of sizes (in bytes) of shared memory slots
divided by the mini-batch size, i.e. the maximal number of samples stored in
such a slot. This value corresponds to external source's ``bytes_per_sample_hint``
parameter, i.e., if the hint is big enough and the external source does not need
to reallocate the memory, the values should be equal.
"""
if self._py_pool is None:
capacities, per_sample_capacities = [], []
else:
capacities = [
shm.capacity for context in self._py_pool.contexts
for shm in context.shm_manager.shm_pool
]
per_sample_capacities = []
for context in self._py_pool.contexts:
num_mini_batches = context.shm_manager.num_minibatches
batch_size = self.max_batch_size
mini_batch_size = (batch_size + num_mini_batches - 1) // num_mini_batches
for shm in context.shm_manager.shm_pool:
per_sample_capacities.append(shm.capacity // mini_batch_size)
return {
"capacities": capacities,
"per_sample_capacities": per_sample_capacities,
}
def reader_meta(self, name=None):
"""Returns provided reader metadata as a dictionary. If no name is provided if provides
a dictionary with data for all readers as {reader_name : meta}
Available metadata keys:
``epoch_size``: raw epoch size
``epoch_size_padded``: epoch size with the padding at the end to be divisible by
the number of shards
``number_of_shards``: number of shards
``shard_id``: shard id of given reader
``pad_last_batch``: if given reader should pad last batch
``stick_to_shard``: if given reader should stick to its shard
Parameters
----------
name : str, optional, default = None
The reader which should be used to obtain shards_number.
"""
if not self._built:
raise RuntimeError("Pipeline must be built first.")
if name is not None:
return self._pipe.reader_meta(name)
return self._pipe.reader_meta()
@staticmethod
def current():
"""Returns the instance of the current pipeline set by :meth:`push_current`."""
return getattr(pipeline_tls, 'current_pipeline', None)
@staticmethod
def _raise_pipeline_required(op_name):
raise RuntimeError(
"Current Pipeline not set!\n" + op_name
+ " operator must be used inside `define_graph` or "
"current pipeline must be explicitly set using context manager (`with my_pipeline:`) "
"or with a call to `Pipeline.push_current(my_pipeline)`.")
@staticmethod
def push_current(pipeline):
"""Sets the pipeline as current and stores the previous current pipeline
on stack. To restore previous pipeline as current, use :meth:`pop_current`.
To make sure that the pipeline is properly restored in case of exception, use context
manager (`with my_pipeline:`).
Current pipeline is required to call operators with side effects or without outputs.
Examples of such operators are `PythonFunction` (potential side effects) or `DumpImage`
(no output).
Any dangling operator can be marked as having side effects if it's marked
with `preserve=True`, which can be useful for debugging - otherwise operator which
does not contribute to the pipeline output is removed from the graph.
"""
prev = Pipeline.current()
pipeline_tls.current_pipeline = pipeline
stack = getattr(pipeline_tls, 'pipeline_stack', None)
if stack is None:
pipeline_tls.pipeline_stack = [prev]
else:
stack.append(prev)
return prev
@staticmethod
def pop_current():
"""Restores previous pipeline as current. Complementary to :meth:`push_current`."""
pipeline_tls.current_pipeline = pipeline_tls.pipeline_stack.pop()
def __enter__(self):
"""Safely sets the pipeline as current.
Current pipeline is required to call operators with side effects or without outputs.
Examples of such operators are `PythonFunction` (potential side effects) or `DumpImage`
(no output).
Any dangling operator can be marked as having side effects if it's marked
with `preserve=True`, which can be useful for debugging - otherwise operator which
does not contribute to the pipeline output is removed from the graph.
To manually set new (and restore previous) current pipeline, use :meth:`push_current`
and :meth:`pop_current`, respectively.
"""
Pipeline.push_current(self)
return self
def __exit__(self, exception_type, exception_value, traceback):
"""Safely restores previous pipeline."""
Pipeline.pop_current()
def add_sink(self, edge):
"""Marks an edge as a data sink, preventing it from being pruned, even if it's not
connected to the pipeline output.
"""
self._sinks.append(edge)
def _set_api_type(self, type):
if type not in types.PipelineAPIType:
raise RuntimeError(
"Wrong pipeline API set!"
"check available values in :meth:`nvidia.dali.types.PipelineAPIType`")
self._api_type = type
def _check_api_type(self, type):
if self._api_type is None:
self._set_api_type(type)
if type != self._api_type:
raise RuntimeError(f"Mixing pipeline API type. Currently used: {self._api_type}, "
f"but trying to use {str(type)}")
def enable_api_check(self, enable):
"""Allows to enable or disable API check in the runtime
"""
self._skip_api_check = not enable
def _check_api_type_scope(self, type):
"""Checks the API currently used by pipeline and throws an error if it differs
It helps preventing of mixing simple, iterator and scheduled based API for
pipeline run. Disables further checks in its scope
"""
if not self._skip_api_check:
self._check_api_type(type)
class api_checker():
def __init__(self, pipe):
self._pipe = pipe
def __enter__(self):
self._old_skip_api_check = self._pipe._skip_api_check
self._pipe._skip_api_check = True
def __exit__(self, type, value, traceback):
self._pipe._skip_api_check = self._old_skip_api_check
return api_checker(self)
# Graph is constructed by backtracking from the output edges and the edges marked as sinks
def _build_graph(self, define_graph=None):
if define_graph is not None:
if self._graph_out is not None:
raise RuntimeError(
"Duplicate graph definition - `define_graph` argument "
"should not be specified when graph was defined with a call to `set_outputs`.")
else:
define_graph = self.define_graph
if self._graph_out:
outputs = self._graph_out
else:
with self:
outputs = define_graph()
if isinstance(outputs, tuple):
outputs = list(outputs)
elif not isinstance(outputs, list):
outputs = [outputs]
def is_nested(container, pred, container_types):
if isinstance(container, container_types):
if any(pred(x) for x in container):
return True
for x in container:
if is_nested(x, pred, container_types):
return True
return False
def contains_nested_datanode(nested):
return is_nested(nested, lambda x: isinstance(x, DataNode), (list, tuple))
for i in range(len(outputs)):
if isinstance(outputs[i], types.ScalarConstant):
import nvidia.dali.ops
outputs[i] = nvidia.dali.ops._instantiate_constant_node(outputs[i], "cpu")
elif contains_nested_datanode(outputs[i]):
raise TypeError(f"Illegal pipeline output type. The output {i} contains a nested "
"`DataNode`. Missing list/tuple expansion (*) is the likely cause.")
elif not isinstance(outputs[i], DataNode):
try:
outputs[i] = types.Constant(outputs[i], device="cpu")
except TypeError:
raise TypeError(
f"Illegal output type. The output {i} is a `{type(outputs[i])}`. "
f"Allowed types are ``DataNode`` and types convertible to "
f"`types.Constant` (numerical constants, 1D lists/tuple of numbers "
f"and ND arrays).")
_data_node._check(outputs[i])
self._ops = _collect_ops(list(outputs) + self._sinks)
self._graph_outputs = outputs
self._setup_input_callbacks()
self._disable_pruned_external_source_instances()
self._py_graph_built = True
def _setup_pipe_pool_dependency(self):
if self._py_pool_started:
# The sole point of this call is to ensure the lifetime of the pool exceeds the lifetime
# of the pipeline's backend, so that shared memory managed by the pool is not freed
# before pipline's backend is garbage collected.
# Otherwise the backend may try to access unmmaped memory which leads to
# crashes at the Python teardown.
self._pipe.SetPyObjDependency(self._py_pool)
def _start_py_workers(self):
if not self._parallel_input_callbacks:
return
self._py_pool = WorkerPool.from_groups(self._parallel_input_callbacks,
self._prefetch_queue_depth,
self._max_batch_size,
self._py_start_method,
self._py_num_workers,
py_callback_pickler=self._py_callback_pickler)
# ensure processes started by the pool are termineted when pipeline is no longer used
weakref.finalize(self, lambda pool: pool.close(), self._py_pool)
self._py_pool_started = True
def _init_pipeline_backend(self):
device_id = self._device_id if self._device_id is not None else types.CPU_ONLY_DEVICE_ID
if device_id != types.CPU_ONLY_DEVICE_ID:
b.check_cuda_runtime()
self._pipe = b.Pipeline(self._max_batch_size,
self._num_threads,
device_id,
self._seed if self._seed is not None else -1,
self._exec_pipelined,
self._cpu_queue_size,
self._exec_async,
self._bytes_per_sample,
self._set_affinity,
self._max_streams,
self._default_cuda_stream_priority)
self._pipe.SetExecutionTypes(self._exec_pipelined, self._exec_separated, self._exec_async)
self._pipe.SetQueueSizes(self._cpu_queue_size, self._gpu_queue_size)
self._pipe.EnableExecutorMemoryStats(self._enable_memory_stats)
# Add the ops to the graph and build the backend
related_logical_id = {}
for op in self._ops:
if op.relation_id not in related_logical_id:
related_logical_id[op.relation_id] = self._pipe.AddOperator(op.spec, op.name)
else:
self._pipe.AddOperator(op.spec, op.name, related_logical_id[op.relation_id])
self._backend_prepared = True
self._names_and_devices = [(e.name, e.device) for e in self._graph_outputs]
def _disable_pruned_external_source_instances(self):
def truncate_str(obj, max_len=103):
obj_str = str(obj)
if len(obj_str) <= max_len:
return obj_str
return obj_str[:max_len - 3] + "..."
graph_op_ids = set(op.id for op in self._ops)
for group in self._input_callbacks:
pruned_mask = [op.id not in graph_op_ids for op in group.instances]
if any(pruned_mask):
group.disable_pruned_instances(pruned_mask)
pruned_idx = [i for i, is_pruned in enumerate(pruned_mask) if is_pruned]
source_str = truncate_str(group.source_desc.source)
num_outputs = len(group.instances)
pruned_idx_str = ", ".join(str(idx) for idx in pruned_idx)
if len(pruned_idx) > 1:
pruned_str = f"outputs at the indices {pruned_idx_str} are"
else:
pruned_str = f"output at the index {pruned_idx_str} is"
warnings.warn(
f"The external source node '{source_str}' produces {num_outputs} outputs, "
f"but the {pruned_str} not used. For best performance, adjust your "
f"callback so that it computes only the needed outputs.",
Warning
)
def _setup_input_callbacks(self):
from nvidia.dali.external_source import _is_external_source_with_callback
groups = set()
for op in self._ops:
if _is_external_source_with_callback(op):
group = op._group
groups.add(group)
groups = list(groups)
self._input_callbacks = groups
if self._py_num_workers == 0:
self._parallel_input_callbacks = []
self._seq_input_callbacks = self._input_callbacks
else:
parallel = [group for group in groups if group.parallel]
dedicated_worker_cbs = [
group for group in parallel if WorkerPool.is_iterable_group(group)
]
general_cbs = [group for group in parallel if not WorkerPool.is_iterable_group(group)]
# make the callbacks that need dedicated worker first in line for prefetching, so that
# the worker doesn't get busy with other tasks when dedicated tasks arrive
self._parallel_input_callbacks = dedicated_worker_cbs + general_cbs
self._seq_input_callbacks = [group for group in groups if not group.parallel]
def start_py_workers(self):
"""
Start Python workers (that will run ``ExternalSource`` callbacks).
You need to call :meth:`start_py_workers` before you call any functionality that creates
or acquires CUDA context when using ``fork`` to start Python
workers (``py_start_method="fork"``). It is called automatically by
:meth:`Pipeline.build` method when such separation is not necessary.
If you are going to build more than one pipeline that starts Python workers by forking
the process then you need to call :meth:`start_py_workers` method on all those pipelines
before calling :meth:`build` method of any pipeline, as build acquires CUDA context
for current process.
The same applies to using any other functionality that would create CUDA context -
for example, initializing a framework that uses CUDA or creating CUDA tensors with it.
You need to call :meth:`start_py_workers` before you call such functionality when
using ``py_start_method="fork"``.
Forking a process that has a CUDA context is unsupported and may lead to unexpected errors.
If you use the method you cannot specify ``define_graph`` argument
when calling :meth:`build`.
"""
if not self._py_graph_built:
self._build_graph()
if not self._py_pool_started:
self._start_py_workers()
def build(self):
"""Build the pipeline.
Pipeline needs to be built in order to run it standalone.
Framework-specific plugins handle this step automatically.
"""
if self._built:
return
if self.num_threads < 1:
raise ValueError("Pipeline created with `num_threads` < 1 can only be used "
"for serialization.")
self.start_py_workers()
if not self._backend_prepared:
self._init_pipeline_backend()
self._setup_pipe_pool_dependency()
self._pipe.Build(self._generate_build_args())
self._built = True
def _feed_input(self, name, data, layout=None, cuda_stream=None, use_copy_kernel=False):
from nvidia.dali.external_source import _prep_data_for_feed_input
if cuda_stream is None:
cuda_stream = types._get_default_stream_for_array(data)
if cuda_stream == -1:
cuda_stream = None
else:
cuda_stream = types._raw_cuda_stream(cuda_stream)
data = _prep_data_for_feed_input(data, self._max_batch_size, layout, self._device_id)
if isinstance(data, list):
self._pipe.SetExternalTensorInput(name, data, ctypes.c_void_p(cuda_stream),
use_copy_kernel)
else:
self._pipe.SetExternalTLInput(name, data, ctypes.c_void_p(cuda_stream), use_copy_kernel)
def feed_input(self, data_node, data, layout=None, cuda_stream=None, use_copy_kernel=False):
"""Pass a multidimensional array or DLPack (or a list thereof) to an eligible operator.
The operators that may be provided with data using this function are the input operators
(i.e. everything in ``fn.inputs`` module) and the :meth:`fn.external_source`.
In the case of the GPU input, the data must be modified on the same stream as the one
used by ``feed_input``. See ``cuda_stream`` parameter for details.
In order to avoid stalls, the data should be provided ahead of time `prefetch_queue_depth`
times.
Parameters
----------
data_node : :class:`DataNode` or a string
The name of an eligible operator node or a :class:`DataNode`
object returned by a call to that operator.
data : ndarray or DLPack or a list thereof
The array(s) may be one of:
* NumPy ndarray (CPU)
* MXNet ndarray (CPU)
* PyTorch tensor (CPU or GPU)
* CuPy array (GPU)
* objects implementing ``__cuda_array_interface__``
* DALI ``TensorList`` or list of DALI ``Tensor`` objects
The data to be used as the output of the operator referred to by ``data_node``.
layout : string or ``None``
The description of the data layout (or empty string, if not specified).
It should be a string of the length that matches the dimensionality of the data, batch
dimension excluded. For a batch of channel-first images, this should be ``"CHW"``, for
channel-last video it's ``"FHWC"`` and so on.
If ``data`` is a DALI ``TensorList`` or a list of DALI ``Tensor`` objects and ``layout``
is ``None``, the layout is taken from ``data``.
The layout of the data must be the same in each iteration.
cuda_stream : optional, ``cudaStream_t`` or an object convertible to ``cudaStream_t``,
e.g. ``cupy.cuda.Stream``, ``torch.cuda.Stream``
The CUDA stream, which is going to be used for copying data to GPU or from a GPU
source. If not set, best effort will be taken to maintain correctness - i.e. if the data
is provided as a tensor/array from a recognized library (CuPy, PyTorch), the library's
current stream is used. This should work in typical scenarios, but advanced use cases
(and code using unsupported libraries) may still need to supply the stream handle
explicitly.
Special values:
* 0 - use default CUDA stream
* -1 - use DALI's internal stream
If internal stream is used, the call to ``feed_input`` will block until the copy to
internal buffer is complete, since there's no way to synchronize with this stream to
prevent overwriting the array with new data in another stream.
use_copy_kernel : optional, ``bool``
If set to True, DALI will use a CUDA kernel to feed the data (only applicable
when copying data to/from GPU memory) instead of ``cudaMemcpyAsync`` (default).
"""
if not self._built:
raise RuntimeError("Pipeline must be built first.")
if isinstance(data_node, str):
name = data_node
else:
_data_node._check(data_node)
name = data_node.name
# Check for use of feed_input on an external_source operator that was
# initialized with 'source'. This check makes sense only for fully Python-based
# pipelines, and not deserialized ones.
from .external_source import _is_external_source
if not self._deserialized:
if next(
(_is_external_source(op) and op._callback is not None
for op in self._ops if op.name == name),
False):
raise RuntimeError(
f"Cannot use `feed_input` on the external source '{name}' with a `source`"
" argument specified.")
self._feed_input(name, data, layout, cuda_stream, use_copy_kernel)
def _run_cpu(self):
"""Run CPU portion of the pipeline."""
if not self._built:
raise RuntimeError("Pipeline must be built first.")
if not self._last_iter:
self._pipe.RunCPU()
self._cpu_batches_to_consume += 1
def _run_gpu(self):
"""Run GPU portion of the pipeline."""
if not self._built:
raise RuntimeError("Pipeline must be built first.")
if self._cpu_batches_to_consume > 0:
self._pipe.RunGPU()
self._cpu_batches_to_consume -= 1
self._gpu_batches_to_consume += 1
def outputs(self):
"""Returns the outputs of the pipeline and releases previous buffer.
If the pipeline is executed asynchronously, this function blocks
until the results become available. It rises StopIteration if data set
reached its end - usually when iter_setup cannot produce any more data.
:return:
A list of `TensorList` objects for respective pipeline outputs
"""
with self._check_api_type_scope(types.PipelineAPIType.SCHEDULED):
if self._batches_to_consume == 0 or self._gpu_batches_to_consume == 0:
raise StopIteration
self._batches_to_consume -= 1
self._gpu_batches_to_consume -= 1
return self._outputs()
def schedule_run(self):
"""Run the pipeline without returning the resulting buffers.
If the pipeline was created with `exec_pipelined` option set to `True`,
this function will also start prefetching the next iteration for
faster execution. It provides better control to the users about when they
want to run the pipeline, when they want to obtain resulting buffers
and return them to DALI buffer pool when the results have been consumed.
Needs to be used together with :meth:`release_outputs`
and :meth:`share_outputs`.
Should not be mixed with :meth:`run` in the same pipeline"""
with self._check_api_type_scope(types.PipelineAPIType.SCHEDULED):
if self._first_iter and self._exec_pipelined:
self._prefetch()
else:
self._run_once()
# for the backward compatibility
def _run(self):
"""Deprecated. Use `schedule_run` instead."""
_show_deprecation_warning("_run", "schedule_run")
self.schedule_run()
def share_outputs(self):
"""Returns the outputs of the pipeline.
Main difference to :meth:`outputs`
is that share_outputs doesn't release returned buffers, release_outputs
need to be called for that. If the pipeline is executed asynchronously,
this function blocks until the results become available. It provides
the user with better control about when he wants to run the pipeline, when he wants
to obtain the resulting buffers and when they can be returned to DALI pool when the
results have been consumed.
Needs to be used together with :meth:`release_outputs`
and :meth:`schedule_run`
Should not be mixed with :meth:`run` in the same pipeline.
:return:
A list of `TensorList` objects for respective pipeline outputs
"""
with self._check_api_type_scope(types.PipelineAPIType.SCHEDULED):
if self._batches_to_consume == 0 or self._gpu_batches_to_consume == 0:
raise StopIteration
self._batches_to_consume -= 1
self._gpu_batches_to_consume -= 1
return self._pipe.ShareOutputs()
# for the backward compatibility
def _share_outputs(self):
"""Deprecated. Use :meth:`share_outputs` instead"""
_show_deprecation_warning("_share_outputs", "share_outputs")
self.share_outputs()
def release_outputs(self):
"""Release buffers returned by share_outputs calls.
It helps in case when output call result is consumed (copied)
and buffers can be marked as free before the next call to share_outputs. It provides
the user with better control about when he wants to run the pipeline, when he wants
to obtain the resulting buffers and when they can be returned to DALI pool when the
results have been consumed.
Needs to be used together with :meth:`schedule_run`
and :meth:`share_outputs`
Should not be mixed with :meth:`run` in the same pipeline"""
with self._check_api_type_scope(types.PipelineAPIType.SCHEDULED):
if not self._built:
raise RuntimeError("Pipeline must be built first.")
return self._pipe.ReleaseOutputs()
# for the backward compatibility
def _release_outputs(self):
"""Deprecated. Use :meth:`release_outputs` instead"""
_show_deprecation_warning("_release_outputs", "release_outputs")
self.release_outputs()
def _outputs(self):
"""Release buffers previously returned and returns the calls.
Calling this function is equivalent to calling release_outputs
then calling share_outputs"""
if not self._built:
raise RuntimeError("Pipeline must be built first.")
return self._pipe.Outputs()
def _are_pipeline_inputs_possible(self):
"""
Returns True if using pipeline_inputs argument in .run() function is possible.
"""
if not self.exec_pipelined:
return True
if self.exec_separated:
return self._cpu_queue_size <= 1 and self._gpu_queue_size <= 1
return self.prefetch_queue_depth <= 1
def run(self, **pipeline_inputs):
"""
Run the pipeline and return the result.
If the pipeline was created with `exec_pipelined` option set to `True`,
this function will also start prefetching the next iteration for
faster execution.
Should not be mixed with :meth:`schedule_run` in the same pipeline,
:meth:`share_outputs` and
:meth:`release_outputs`
Parameters
----------
pipeline_inputs :
Optional argument that can be used to provide inputs to DALI.
When DALI has any input operators defined (e.g. fn.external_source), you can provide the
inputs to those using named arguments in this function. The assumption is that
DALI pipeline has them defined and named properly::
@pipeline_def
def my_pipe():
inp = fn.external_source(name="my_inp")
return inp
With the example pipeline above, you can provide ``"my_inp"`` input into the
:meth:`run()` function::
p = my_pipe(prefetch_queue_depth=1, ...)
p.build()
p.run(my_inp=np.random((2,3,2)))
Such keyword argument specified in the :meth:`run()` function has to have a
corresponding input operator node declared in DALI pipeline.
As always when working with DALI, the value passed to the keyword argument has to
denote a whole batch of data.
Please note, that using this feature requires setting either ``prefetch_queue_depth=1``
or ``exec_pipelined=False`` in DALI Pipeline constructor.
This feature can be considered as a syntactic sugar over :meth:`feed_input` function.
Returns
-------
A list of `TensorList` objects for respective pipeline outputs
"""
if len(pipeline_inputs) > 0 and not self._are_pipeline_inputs_possible():
raise RuntimeError(f"""
When using pipeline_inputs named arguments, either
`prefetch_queue_depth` in Pipeline constructor shall be set to 1 (for both devices)
or `exec_pipelined` shall be set to False.
Received: prefetch_queue_depth={self.prefetch_queue_depth},
exec_pipelined={self.exec_pipelined}.
Please set the `prefetch_queue_depth` or `exec_pipelined` argument in the Pipeline
constructor properly or provide inputs to DALI Pipeline via another mean
(e.g. `feed_input` function or `source` argument in the `fn.external_source`
operator.)""")
for inp_name, inp_value in pipeline_inputs.items():
self.feed_input(inp_name, inp_value)
with self._check_api_type_scope(types.PipelineAPIType.BASIC):
self.schedule_run()
return self.outputs()
def _prefetch(self):
"""Executes pipeline to fill executor's pipeline."""
if not self._built:
raise RuntimeError("Pipeline must be built first.")
self._schedule_py_workers()
if self._exec_separated:
self._fill_separated_queues()
else:
for _ in range(self._prefetch_queue_depth):
self._run_once()
self._first_iter = False
def _run_once(self):
"""Start running the whole pipeline once without waiting for its results.
If the pipeline was created with `exec_async` option set to `True`,
this function will return without waiting for the execution to end."""
try:
if not self._last_iter:
self._iter_setup()
self._batches_to_consume += 1
# Special case to prevent a deadlock if user didn't release the only buffer
if not self._exec_async and self._prefetch_queue_depth == 1:
self.release_outputs()
self._run_cpu()
self._run_gpu()
except StopIteration:
self._last_iter = True
def _run_up_to(self, stage_name):
"""Call the `_run_X` up to `stage_name` (inclusive).
"""
try:
if not self._last_iter:
self._iter_setup()
self._batches_to_consume += 1
self._run_cpu()
if stage_name == "cpu":
return
self._run_gpu()
if stage_name == "gpu":
return
except StopIteration:
self._last_iter = True
def _schedule_py_workers(self):
if self._py_pool is None:
return
for i, group in enumerate(self._parallel_input_callbacks):
group.prefetch(self._py_pool, i, self._max_batch_size, self._epoch_idx)
def _fill_separated_queues(self):
"""When using separated execution fill each of the prefetch queues
"""
if not self._built:
raise RuntimeError("Pipeline must be built first.")
if not self._first_iter:
raise RuntimeError("Queues can be filled only on first iteration.")
if not self._exec_separated:
raise RuntimeError("This function should be only used with separated execution.")
for i in range(self._gpu_queue_size):
self._run_up_to("gpu")
for i in range(self._cpu_queue_size):
self._run_up_to("cpu")
def reset(self):
"""Resets pipeline iterator
If pipeline iterator reached the end then reset its state to the beginning.
"""
if self._last_iter:
self._first_iter = True
self._last_iter = False
self._iter = 0
self._epoch_idx += 1
if self._input_callbacks:
for group in self._input_callbacks:
group.reset_indices()
for i, group in enumerate(self._parallel_input_callbacks):
# iterators are not reset or their prefetch results discarded
# unless they have caused an exception
if not self._py_pool.is_iterable_group(group):
self._py_pool.reset_context(i)
def empty(self):
"""If there is any work scheduled in the pipeline but not yet consumed
"""
return self._batches_to_consume == 0
def serialize(self, define_graph=None, filename=None):
"""Serialize the pipeline to a Protobuf string.
Additionally, you can pass file name, so that serialized pipeline will be written there.
The file contents will be overwritten
Parameters
----------
define_graph : callable
If specified, this function will be used instead of member :meth:`define_graph`.
This parameter must not be set, if the pipeline outputs are specified with
:meth:`set_outputs`.
filename : str
File, from where serialized pipeline will be writeen.
kwargs : dict
Refer to Pipeline constructor for full list of arguments.
"""
if define_graph is not None and not callable(define_graph):
raise TypeError("Provided `define_graph` argument is not callable."
+ (" Didn't you want to write `.serialize(filename=...)`?"
if isinstance(define_graph, str) else ""))
if not self._py_graph_built:
self._build_graph(define_graph)
if not self._backend_prepared:
self._init_pipeline_backend()
self._pipe.SetOutputDescs(self._generate_build_args())
ret = self._pipe.SerializeToProtobuf()
if filename is not None:
with open(filename, 'wb') as pipeline_file:
pipeline_file.write(ret)
return ret
@classmethod
def deserialize(cls, serialized_pipeline=None, filename=None, **kwargs):
"""Deserialize and build pipeline.
Deserialize pipeline, previously serialized with ``serialize()`` method.
Returned pipeline is already built.
Alternatively, additional arguments can be passed, which will be used when instantiating
the pipeline. Refer to Pipeline constructor for full list of arguments. By default,
the pipeline will be instantiated with the arguments from serialized pipeline.
Note, that ``serialized_pipeline`` and ``filename`` parameters are mutually exclusive
Parameters
----------
serialized_pipeline : str
Pipeline, serialized using ``serialize()`` method.
filename : str
File, from which serialized pipeline will be read.
kwargs : dict
Refer to Pipeline constructor for full list of arguments.
Returns
----------
Deserialized and built pipeline.
"""
kw = kwargs
if (serialized_pipeline is None) == (filename is None): # XNOR
raise ValueError("serialized_pipeline and filename arguments are mutually exclusive. "
"Precisely one of them should be defined.")
pipeline = cls()
if filename is not None:
with open(filename, 'rb') as pipeline_file:
serialized_pipeline = pipeline_file.read()
pipeline._pipe = b.Pipeline(serialized_pipeline,
kw.get("batch_size", -1),
kw.get("num_threads", -1),
kw.get("device_id", -1),
kw.get("exec_pipelined", True),
kw.get("prefetch_queue_depth", 2),
kw.get("exec_async", True),
kw.get("bytes_per_sample", 0),
kw.get("set_affinity", False),
kw.get("max_streams", -1),
kw.get("default_cuda_stream_priority", 0))
if pipeline.device_id != types.CPU_ONLY_DEVICE_ID:
b.check_cuda_runtime()
pipeline._pipe.SetExecutionTypes(pipeline._exec_pipelined, pipeline._exec_separated,
pipeline._exec_async)
pipeline._pipe.SetQueueSizes(pipeline._cpu_queue_size, pipeline._gpu_queue_size)
pipeline._pipe.EnableExecutorMemoryStats(pipeline._enable_memory_stats)
pipeline._backend_prepared = True
pipeline._pipe.Build()
pipeline._built = True
pipeline._deserialized = True
pipeline._max_batch_size = kw.get("batch_size", -1)
pipeline._num_threads = kw.get("num_threads", -1)
pipeline._device_id = kw.get("device_id", -1)
pipeline._exec_pipelined = kw.get("exec_pipelined", True)
pipeline._prefetch_queue_depth = kw.get("prefetch_queue_depth", 2)
pipeline._exec_async = kw.get("exec_async", True)
pipeline._bytes_per_sample = kw.get("bytes_per_sample", 0)
pipeline._set_affinity = kw.get("set_affinity", False)
pipeline._max_streams = kw.get("max_streams", -1)
pipeline._default_cuda_stream_priority = kw.get("default_cuda_stream_priority", 0)
return pipeline
def deserialize_and_build(self, serialized_pipeline):
"""Deserialize and build the pipeline given in serialized form.
Parameters
----------
serialized_pipeline : str
Serialized pipeline.
"""
self._pipe = b.Pipeline(serialized_pipeline,
self._max_batch_size,
self._num_threads,
self._device_id,
self._exec_pipelined,
self._prefetch_queue_depth,
self._exec_async,
self._bytes_per_sample,
self._set_affinity,
self._max_streams,
self._default_cuda_stream_priority)
self._pipe.SetExecutionTypes(self._exec_pipelined, self._exec_separated, self._exec_async)
self._pipe.SetQueueSizes(self._cpu_queue_size, self._gpu_queue_size)
self._pipe.EnableExecutorMemoryStats(self._enable_memory_stats)
self._backend_prepared = True
self._pipe.Build()
self._built = True
self._deserialized = True
def save_graph_to_dot_file(self, filename, show_tensors=False, show_ids=False,
use_colors=False):
"""Saves the pipeline graph to a file.
Parameters
----------
filename : str
Name of the file to which the graph is written.
show_tensors : bool
Show the Tensor nodes in the graph (by default only Operator nodes are shown)
show_ids : bool
Add the node id to the graph representation
use_colors : bool
Whether use color to distinguish stages
"""
if not self._built:
raise RuntimeError("Pipeline must be built first.")
self._pipe.SaveGraphToDotFile(filename, show_tensors, show_ids, use_colors)
def set_outputs(self, *output_data_nodes):
"""Set the outputs of the pipeline.
Use of this function is an alternative to overriding `define_graph` in a derived class.
Args
----
`*output_data_nodes` : unpacked list of :class:`DataNode` objects
The outputs of the pipeline
"""
self._graph_out = output_data_nodes
def define_graph(self):
"""This function is defined by the user to construct the
graph of operations for their pipeline.
It returns a list of outputs created by calling DALI Operators."""
raise NotImplementedError
def _run_input_callbacks(self):
if self._input_callbacks is None:
return
batches = [] # data from external source callbacks is gathered here
stop_iter = False
for i, group in enumerate(self._parallel_input_callbacks):
try:
batches.append(
group.schedule_and_receive(self, self._py_pool, i, self._max_batch_size,
self._epoch_idx))
except StopIteration:
stop_iter = True
for group in self._seq_input_callbacks:
try:
batches.append(group.get_batch(self, self._max_batch_size, self._epoch_idx))
except StopIteration:
stop_iter = True
if stop_iter:
raise StopIteration()
# we only fill external source queues when we know that all callbacks succeeded
for batch in batches:
batch.feed()
def _iter_setup(self):
self._run_input_callbacks()
self.iter_setup()
self._iter += 1
def iter_setup(self):
"""This function can be overriden by user-defined
pipeline to perform any needed setup for each iteration.
For example, one can use this function to feed the input
data from NumPy arrays."""
pass
def _generate_build_args(self):
num_outputs = len(self._names_and_devices)
dtypes = [self._output_dtype] * num_outputs if type(
self._output_dtype) is not list else self._output_dtype
ndims = [self._output_ndim] * num_outputs if type(
self._output_ndim) is not list else self._output_ndim
if not (len(dtypes) == len(ndims) == num_outputs):
raise RuntimeError(
f"Lengths of provided output descriptions do not match. \n"
f"Expected num_outputs={num_outputs}."
f"\nReceived:\noutput_dtype={dtypes}\noutput_ndim={ndims}"
)
return [(name, dev, types.NO_TYPE if dtype is None else dtype, -1 if ndim is None else ndim)
for (name, dev), dtype, ndim in zip(self._names_and_devices, dtypes, ndims)]
def _discriminate_args(func, **func_kwargs):
"""Split args on those applicable to Pipeline constructor and the decorated function."""
func_argspec = inspect.getfullargspec(func)
ctor_argspec = inspect.getfullargspec(Pipeline.__init__)
if 'debug' not in func_argspec.args and 'debug' not in func_argspec.kwonlyargs:
func_kwargs.pop('debug', False)
if ('enable_conditionals' not in func_argspec.args
and 'enable_conditionals' not in func_argspec.kwonlyargs):
func_kwargs.pop('enable_conditionals', False)
ctor_args = {}
fn_args = {}
if func_argspec.varkw is not None:
raise TypeError(
f"Using variadic keyword argument `**{func_argspec.varkw}` in a "
f"graph-defining function is not allowed.")
for farg in func_kwargs.items():
is_ctor_arg = farg[0] in ctor_argspec.args or farg[0] in ctor_argspec.kwonlyargs
is_fn_arg = farg[0] in func_argspec.args or farg[0] in func_argspec.kwonlyargs
if is_fn_arg:
fn_args[farg[0]] = farg[1]
if is_ctor_arg:
print(
f"Warning: the argument `{farg[0]}` shadows a Pipeline constructor "
"argument of the same name.")
elif is_ctor_arg:
ctor_args[farg[0]] = farg[1]
else:
assert False, f"This shouldn't happen. Please double-check the `{farg[0]}` argument"
return ctor_args, fn_args
def _regroup_args(func, pipeline_def_kwargs, fn_call_kwargs):
"""Regroup arguments that are directed into Pipeline object construction (Pipeline kwargs)
and those that are passed into pipeline definition function (Function kwargs).
Parameters
----------
func : Callable
The pipeline definition function that is decorated.
pipeline_def_kwargs : Dict
Kwargs passed to the @pipeline_def
fn_call_kwargs : Dict
Kwargs passed when invoking the decorated function
Returns
-------
(Dict, Dict)
Pipeline kwargs, Function kwargs
"""
ctor_args, fn_kwargs = _discriminate_args(func, **fn_call_kwargs)
pipeline_kwargs = {**pipeline_def_kwargs, **ctor_args} # Merge and overwrite dict
return pipeline_kwargs, fn_kwargs
def _preprocess_pipe_func(func, conditionals_on):
"""Transform the pipeline definition function if the conditionals are enabled
"""
if conditionals_on:
return _conditionals._autograph.to_graph(func)
else:
return func
def _preprocess_pipe_object(pipe, conditionals_on, args, fn_kwargs):
"""Based on the conditional mode status, preprocess the pipeline object before the graph
is created.
"""
if conditionals_on:
# We push and pop manually to be compatible with _PipelineDebug
try:
Pipeline.push_current(pipe)
pipe._conditionals_enabled = True
pipe._condition_stack = _conditionals._ConditionStack()
# Add all parameters to the pipeline as "know" nodes in the top scope.
for arg in args:
if isinstance(arg, DataNode):
_conditionals.register_data_nodes(arg)
for _, arg in fn_kwargs.items():
if isinstance(arg, DataNode):
_conditionals.register_data_nodes(arg)
finally:
Pipeline.pop_current()
def _generate_graph(pipe, func, fn_args, fn_kwargs):
"""Build the graph provided by pipeline definition in `func` within the `pipe`.
Parameters
----------
pipe : Pipeline
Target pipeline object
func : Callable
The pipeline definition that is decorated
fn_args : List
Positional arguments to `func`
fn_kwargs : Dict
Kwargs to `func`
"""
with pipe:
pipe_outputs = func(*fn_args, **fn_kwargs)
if isinstance(pipe_outputs, tuple):
po = pipe_outputs
elif pipe_outputs is None:
po = ()
else:
po = (pipe_outputs, )
pipe.set_outputs(*po)
def pipeline_def(fn=None, *, enable_conditionals=False, **pipeline_kwargs):
"""
Decorator that converts a graph definition function into a DALI pipeline factory.
A graph definition function is a function that returns intended pipeline outputs.
You can decorate this function with ``@pipeline_def``::
@pipeline_def
def my_pipe(flip_vertical, flip_horizontal):
''' Creates a DALI pipeline, which returns flipped and original images '''
data, _ = fn.readers.file(file_root=images_dir)
img = fn.decoders.image(data, device="mixed")
flipped = fn.flip(img, horizontal=flip_horizontal, vertical=flip_vertical)
return flipped, img
The decorated function returns a DALI Pipeline object::
pipe = my_pipe(True, False)
# pipe.build() # the pipeline is not configured properly yet
A pipeline requires additional parameters such as batch size, number of worker threads,
GPU device id and so on (see :meth:`nvidia.dali.Pipeline()` for a
complete list of pipeline parameters).
These parameters can be supplied as additional keyword arguments,
passed to the decorated function::
pipe = my_pipe(True, False, batch_size=32, num_threads=1, device_id=0)
pipe.build() # the pipeline is properly configured, we can build it now
The outputs from the original function became the outputs of the Pipeline::
flipped, img = pipe.run()
When some of the pipeline parameters are fixed, they can be specified by name in the decorator::
@pipeline_def(batch_size=42, num_threads=3)
def my_pipe(flip_vertical, flip_horizontal):
...
Any Pipeline constructor parameter passed later when calling the decorated function will
override the decorator-defined params::
@pipeline_def(batch_size=32, num_threads=3)
def my_pipe():
data = fn.external_source(source=my_generator)
return data
pipe = my_pipe(batch_size=128) # batch_size=128 overrides batch_size=32
.. warning::
The arguments of the function being decorated can shadow pipeline constructor arguments -
in which case there's no way to alter their values.
.. note::
Using ``**kwargs`` (variadic keyword arguments) in graph-defining function is not allowed.
They may result in unwanted, silent hijacking of some arguments of the same name by
Pipeline constructor. Code written this way would cease to work with future versions of DALI
when new parameters are added to the Pipeline constructor.
To access any pipeline arguments within the body of a ``@pipeline_def`` function, the function
:meth:`nvidia.dali.Pipeline.current()` can be used::
@pipeline_def()
def my_pipe():
pipe = Pipeline.current()
batch_size = pipe.batch_size
num_threads = pipe.num_threads
...
pipe = my_pipe(batch_size=42, num_threads=3)
...
Keyword args
------------
enable_conditionals : bool, optional
Enable support for conditional execution of DALI operators using ``if`` statements
in the pipeline definition, by default False.
"""
def actual_decorator(func):
@functools.wraps(func)
def create_pipeline(*args, **kwargs):
conditionals_on = kwargs.get('enable_conditionals', enable_conditionals)
pipe_func = _preprocess_pipe_func(func, conditionals_on)
pipeline_args, fn_kwargs = _regroup_args(pipe_func, pipeline_kwargs, kwargs)
pipe = Pipeline(**pipeline_args)
_preprocess_pipe_object(pipe, conditionals_on, args, fn_kwargs)
_generate_graph(pipe, pipe_func, args, fn_kwargs)
return pipe
# Add `is_pipeline_def` attribute to the function marked as `@pipeline_def`
create_pipeline._is_pipeline_def = True
return create_pipeline
return actual_decorator(fn) if fn else actual_decorator
def _collect_ops(output_nodes):
"""
Traverses the pipeline graph starting from the outputs to collect all reachable operators.
Returns the list of operators topologically sorted, so that operators that contribute
as inputs to another operator go first.
"""
def get_source_op(edge: DataNode):
source_op = edge.source
if source_op is None:
raise RuntimeError("Pipeline encountered an Edge with no source op.")
return source_op
def get_op_input_edges(op) -> List[DataNode]:
for inp in op.inputs:
if isinstance(inp, list):
yield from inp
else:
yield inp
def get_op_outputs_num():
# BSF traverse the graph first to learn, for each reachable operator in the graph,
# how many data-nodes/edges the operator contributes to
# (i.e. the number of outputs of the operator instance)
op_outputs_num = {}
edges = deque(output_nodes)
while edges:
current_edge = edges.popleft()
source_op = get_source_op(current_edge)
if source_op.id in op_outputs_num:
op_outputs_num[source_op.id] += 1
else:
op_outputs_num[source_op.id] = 1
source_op.check_args()
edges.extend(get_op_input_edges(source_op))
return op_outputs_num
ops = []
edges = deque(output_nodes)
op_total_outputs_num = get_op_outputs_num()
op_visited_outputs_num = {op_id: 0 for op_id in op_total_outputs_num}
while edges:
current_edge = edges.popleft()
source_op = get_source_op(current_edge)
op_visited_outputs_num[source_op.id] += 1
# Actually visit the operator only when all the nodes it contributes to
# were already processed
if op_visited_outputs_num[source_op.id] == op_total_outputs_num[source_op.id]:
ops.append(source_op)
edges.extend(get_op_input_edges(source_op))
ops.reverse()
return ops
def _pipeline_def_experimental(fn=None, *, enable_conditionals=False, **pipeline_kwargs):
"""Variant of :meth:`@pipeline_def <nvidia.dali.pipeline_def>` decorator that enables additional
experimental features. It has the same API as its non-experimental variant with the addition of
the keyword arguments listed below.
Keyword args
------------
debug : bool, optional
Enable pipeline debug mode - allowing for step-by-step execution and intermediate data
inspection of the pipeline definition, by default False.
.. note::
This mode is intended only for debugging purposes - the pipeline performance will be
significantly worse than the non-debug mode.
.. note::
The features enabled by this decorator are experimental. The API may change and the
functionality may be limited.
"""
from nvidia.dali._debug_mode import _PipelineDebug
pipeline_debug = pipeline_kwargs.pop('debug', False)
def actual_decorator(func):
@functools.wraps(func)
def create_pipeline(*args, **kwargs):
debug_mode_on = kwargs.get('debug', pipeline_debug)
conditionals_on = kwargs.get('enable_conditionals', enable_conditionals)
pipe_func = _preprocess_pipe_func(func, conditionals_on)
pipeline_args, fn_kwargs = _regroup_args(pipe_func, pipeline_kwargs, kwargs)
if debug_mode_on:
pipe = _PipelineDebug(functools.partial(pipe_func, *args, **fn_kwargs),
**pipeline_args)
else:
pipe = Pipeline(**pipeline_args)
_preprocess_pipe_object(pipe, conditionals_on, args, fn_kwargs)
if not debug_mode_on:
_generate_graph(pipe, pipe_func, args, fn_kwargs)
return pipe
# Add `is_pipeline_def` attribute to the function marked as `@pipeline_def`
create_pipeline._is_pipeline_def = True
return create_pipeline
return actual_decorator(fn) if fn else actual_decorator
def _insert_experimental_pipeline_def():
current_module = sys.modules[__name__]
experimental_module = internal.get_submodule(current_module, 'experimental')
_pipeline_def_experimental.__module__ = experimental_module
setattr(experimental_module, 'pipeline_def', _pipeline_def_experimental)
_insert_experimental_pipeline_def()
|
DALI-main
|
dali/python/nvidia/dali/pipeline.py
|
# Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from nvidia.dali.backend_impl.tfrecord import * # noqa: F403 F401
except ImportError:
raise RuntimeError('DALI was not compiled with TFRecord support.'
' Use BUILD_PROTO3=ON CMake option to enable TFRecord support')
|
DALI-main
|
dali/python/nvidia/dali/tfrecord.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
def _arithm_op(*args, **kwargs):
import nvidia.dali.ops
# Fully circular imports don't work. We need to import _arithm_op late and
# replace this trampoline function.
setattr(sys.modules[__name__], "_arithm_op", nvidia.dali.ops._arithm_op)
return nvidia.dali.ops._arithm_op(*args, **kwargs)
def sqrt(input):
"""Computes square root of values in ``input``.
:rtype: TensorList of sqrt(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("sqrt", input)
def rsqrt(input):
"""Computes reciprocal of the square root of values in ``input``.
:rtype: TensorList of rsqrt(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("rsqrt", input)
def cbrt(input):
"""Computes cube root of values in ``input``.
:rtype: TensorList of cbrt(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("cbrt", input)
def exp(input):
"""Computes exponential of values in ``input``.
:rtype: TensorList of exp(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("exp", input)
def log(input):
"""Computes natural logarithm (base-e) of values in ``input``.
:rtype: TensorList of log(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("log", input)
def log2(input):
"""Computes logarithm (base-2) of values in ``input``.
:rtype: TensorList of log2(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("log2", input)
def log10(input):
"""Computes logarithm (base-10) of values in ``input``.
:rtype: TensorList of log10(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("log10", input)
def abs(input):
"""Computes absolute value of values in ``input``.
:rtype: TensorList of abs(input). The type is preserved.
"""
return _arithm_op("abs", input)
def fabs(input):
"""Computes float absolute value of values in ``input``.
:rtype: TensorList of fabs(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("fabs", input)
def floor(input):
"""Computes floor of values in ``input``.
:rtype: TensorList of floor(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("floor", input)
def ceil(input):
"""Computes ceil of values in ``input``.
:rtype: TensorList of ceil(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("ceil", input)
def sin(input):
"""Computes sine of values in ``input``.
:rtype: TensorList of sin(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("sin", input)
def cos(input):
"""Computes cosine of values in ``input``.
:rtype: TensorList of cos(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("cos", input)
def tan(input):
"""Computes tangent of values in ``input``.
:rtype: TensorList of tan(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("tan", input)
def asin(input):
"""Computes arcus sine of values in ``input``.
:rtype: TensorList of asin(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("asin", input)
def acos(input):
"""Computes arcus cosine of values in ``input``.
:rtype: TensorList of acos(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("acos", input)
def atan(input):
"""Computes arcus tangent of values in ``input``.
:rtype: TensorList of atan(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("atan", input)
def sinh(input):
"""Computes hyperbolic sine of values in ``input``.
:rtype: TensorList of sinh(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("sinh", input)
def cosh(input):
"""Computes hyperbolic cosine of values in ``input``.
:rtype: TensorList of cosh(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("cosh", input)
def tanh(input):
"""Computes hyperbolic tangent of values in ``input``.
:rtype: TensorList of tanh(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("tanh", input)
def asinh(input):
"""Computes inverse hyperbolic sine of values in ``input``.
:rtype: TensorList of asinh(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("asinh", input)
def acosh(input):
"""Computes inverse hyperbolic cosine of values in ``input``.
:rtype: TensorList of acosh(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("acosh", input)
def atanh(input):
"""Computes inverse hyperbolic tangent of values in ``input``.
:rtype: TensorList of atanh(input). If input is an integer, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("atanh", input)
def min(left, right):
"""Computes minima of corresponding values in ``left`` and ``right``.
:rtype: TensorList of the type that is calculated based on the type promotion rules.
"""
return _arithm_op("min", left, right)
def max(left, right):
"""Computes maxima of corresponding values in ``left`` and ``right``.
:rtype: TensorList of the type that is calculated based on the type promotion rules.
"""
return _arithm_op("max", left, right)
def pow(base, exponent):
"""Computes base to the power of exponents, that is base ** exponent.
:rtype: TensorList of pow(base, exponent). Type is calculated based on the type promotion rules.
"""
return _arithm_op("pow", base, exponent)
def fpow(base, exponent):
"""Computes base to the power of exponents as floating point numbers.
:rtype: TensorList of pow(base, exponent). If all inputs are integers, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("fpow", base, exponent)
def atan2(x, y):
"""Computes arcus tangent of corresponding values in x / y.
:rtype: TensorList of atan2(x, y). If all inputs are integers, the result will be float,
otherwise the type is preserved.
"""
return _arithm_op("atan2", x, y)
def clamp(value, lo, hi):
"""Produces a tensor of values from ``value`` clamped to the range ``[lo, hi]``.
:rtype: TensorList of the type that is calculated based on the type promotion rules.
"""
return _arithm_op("clamp", value, lo, hi)
|
DALI-main
|
dali/python/nvidia/dali/math.py
|
# Copyright (c) 2017-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-member
import sys
from ._utils.hacks import not_iterable
def _arithm_op(*args, **kwargs):
import nvidia.dali.ops
# Fully circular imports don't work. We need to import _arithm_op late and
# replace this trampoline function.
setattr(sys.modules[__name__], "_arithm_op", nvidia.dali.ops._arithm_op)
return nvidia.dali.ops._arithm_op(*args, **kwargs)
class _NewAxis:
def __init__(self, name=None):
if name is not None:
if not isinstance(name, str):
raise TypeError("Axis name must be a single-character string")
if len(name) != 1:
raise ValueError("Axis name must be a single-character string")
self._name = name
@property
def name(self):
return self._name
def __call__(self, name=None):
return _NewAxis(name)
newaxis = _NewAxis()
class DataNode(object):
"""This class is a symbolic representation of a TensorList and is used at graph definition
stage. It does not carry actual data, but is used to define the connections between operators
and to specify the pipeline outputs. See documentation for :class:`Pipeline` for details.
``DataNode`` objects can be passed to DALI operators as inputs (and some of the named keyword
arguments) but they also provide arithmetic operations which implicitly create appropriate
operators that perform the expressions.
"""
def __init__(self, name, device="cpu", source=None):
self.name = name
self.device = device
self.source = source
def __str__(self):
return f'DataNode(name="{self.name}", device="{self.device}")'
__repr__ = __str__
# Note: Regardless of whether we want the cpu or gpu version
# of a tensor, we keep the source argument the same so that
# the pipeline can backtrack through the user-defined graph
def gpu(self):
from nvidia.dali import _conditionals
if _conditionals.conditionals_enabled():
# Treat it the same way as regular operator would behave
[self_split], _ = _conditionals.apply_conditional_split_to_args([self], {})
transferred_node = DataNode(self_split.name, "gpu", self_split.source)
_conditionals.register_data_nodes(transferred_node, [self])
return transferred_node
return DataNode(self.name, "gpu", self.source)
def __add__(self, other):
return _arithm_op("add", self, other)
def __radd__(self, other):
return _arithm_op("add", other, self)
def __sub__(self, other):
return _arithm_op("sub", self, other)
def __rsub__(self, other):
return _arithm_op("sub", other, self)
def __mul__(self, other):
return _arithm_op("mul", self, other)
def __rmul__(self, other):
return _arithm_op("mul", other, self)
def __pow__(self, other):
return _arithm_op("pow", self, other)
def __rpow__(self, other):
return _arithm_op("pow", other, self)
def __truediv__(self, other):
return _arithm_op("fdiv", self, other)
def __rtruediv__(self, other):
return _arithm_op("fdiv", other, self)
def __floordiv__(self, other):
return _arithm_op("div", self, other)
def __rfloordiv__(self, other):
return _arithm_op("div", other, self)
def __neg__(self):
return _arithm_op("minus", self)
# Short-circuitng the execution, unary + is basically a no-op
def __pos__(self):
return self
def __eq__(self, other):
return _arithm_op("eq", self, other)
def __ne__(self, other):
return _arithm_op("neq", self, other)
def __lt__(self, other):
return _arithm_op("lt", self, other)
def __le__(self, other):
return _arithm_op("leq", self, other)
def __gt__(self, other):
return _arithm_op("gt", self, other)
def __ge__(self, other):
return _arithm_op("geq", self, other)
def __and__(self, other):
return _arithm_op("bitand", self, other)
def __rand__(self, other):
return _arithm_op("bitand", other, self)
def __or__(self, other):
return _arithm_op("bitor", self, other)
def __ror__(self, other):
return _arithm_op("bitor", other, self)
def __xor__(self, other):
return _arithm_op("bitxor", self, other)
def __rxor__(self, other):
return _arithm_op("bitxor", other, self)
def __bool__(self):
raise TypeError(
'"DataNode" was used in conditional context - it might have been used in truth'
" evaluation for `if` statement, logical expression or cast to a boolean."
" To use conditional execution via `if` statements you need to specify"
" `enable_conditionals=True` in `@nvidia.dali.pipeline_def` decorator."
" You can read more about conditional execution in specific section of the Pipeline"
" documentation. Bool conversion can be achieved with the `cast` operator."
)
def __getitem__(self, val):
idxs = []
new_axes = []
new_axis_names = []
# returns True if this index adds a new output dimension
def process_index(idx, dim):
if idx is None:
idxs.append((None, None, None, None))
return True
elif isinstance(idx, slice):
idxs.append((None, idx.start, idx.stop, idx.step))
return True
if isinstance(idx, _NewAxis):
new_axes.append(dim)
if idx.name is not None:
new_axis_names.append(idx.name)
return True
if idx is Ellipsis:
raise NotImplementedError("Ellipsis in indexing is not implemented")
if isinstance(idx, (float, str)):
raise TypeError("Invalid type for an index: ", type)
idxs.append((idx, None, None, None))
return False
if not isinstance(val, tuple):
val = (val, )
d = 0
for v in val:
if process_index(v, d):
d += 1
if len(new_axis_names) != 0:
if len(new_axis_names) != len(new_axes):
raise ValueError("New axis name must be specified for all axes or none.")
new_axis_names = "".join(new_axis_names)
else:
new_axis_names = None
slice_args = {}
for i, (at, lo, hi, step) in enumerate(idxs):
if at is not None:
slice_args["at_%i" % i] = at
if lo is not None:
slice_args["lo_%i" % i] = lo
if hi is not None:
slice_args["hi_%i" % i] = hi
if step is not None:
slice_args["step_%i" % i] = step
import nvidia.dali.fn
if len(slice_args) == 0:
# No true slicing arguments - only full range : and dali.newaxis.
# We need to ensure there are enough dimensions in the input for the number of
# full-range axes.
# If the last index is a newaxis, then ExpandDims will make sure that it makes sense.
# Otherwise we need to add an additional check.
if len(new_axes) > 0 and isinstance(val[-1], _NewAxis):
sliced = self # no check needed, ExpandDims will do the trick
else:
sliced = nvidia.dali.fn.subscript_dim_check(self, num_subscripts=len(idxs))
else:
sliced = nvidia.dali.fn.tensor_subscript(self, **slice_args, num_subscripts=len(idxs))
if len(new_axes) == 0:
return sliced
else:
return nvidia.dali.fn.expand_dims(sliced, axes=new_axes, new_axis_names=new_axis_names)
not_iterable(DataNode)
def _check(maybe_node):
if not isinstance(maybe_node, DataNode):
raise TypeError(f"Expected outputs of type compatible with \"DataNode\". "
f"Received output type with name \"{type(maybe_node).__name__}\" "
f"that does not match.")
|
DALI-main
|
dali/python/nvidia/dali/data_node.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# custom wrappers around ops
from nvidia.dali import backend as _b
from nvidia.dali import tensors as _tensors
from nvidia.dali import types as _types
from nvidia.dali._multiproc.messages import TaskArgs as _TaskArgs, SampleRange as _SampleRange
import nvidia.dali.types
from nvidia.dali._utils.external_source_impl import \
get_callback_from_source as _get_callback_from_source, \
accepted_arg_count as _accepted_arg_count, \
SourceKind as _SourceKind
def _get_batch_shape(data):
if isinstance(data, (list, tuple, _tensors.TensorListCPU, _tensors.TensorListGPU)):
if len(data) == 0:
return [], True
if callable(data[0].shape):
return [x.shape() for x in data], False
else:
return [x.shape for x in data], False
else:
shape = data.shape
if callable(shape):
shape = data.shape()
return [shape[1:]] * shape[0], True
def _check_data_batch(data, batch_size, layout):
shape, uniform = _get_batch_shape(data)
if len(shape) > batch_size:
raise RuntimeError(f"The external source callback returned an unexpected batch "
f"size. Expected batch_size <= {batch_size}, actual: {len(shape)}")
if len(shape) > 0:
dim = len(shape[0])
if not uniform:
for ts in shape:
if len(ts) != dim:
raise RuntimeError(
"All tensors in a batch must have the same number of dimensions")
if layout is not None and layout != "" and dim != len(layout):
raise RuntimeError(f"The layout '{layout}' cannot describe {dim}-dimensional data")
def _prep_data_for_feed_input(data, batch_size, layout, device_id=None):
def to_numpy(x):
if _types._is_mxnet_array(x):
return x.asnumpy()
elif _types._is_torch_tensor(x):
return x.numpy()
else:
return x
# __cuda_array_interface__ doesn't provide any way to pass the information about the device
# where the memory is located. It is assumed that the current device is the one that
# the memory belongs to, unless the user sets the device explicitly
# creating TensorGPU/TensorListGPU
if isinstance(data, (_tensors.TensorListCPU, _tensors.TensorListGPU)):
if layout is not None:
_check_data_batch(data, batch_size, layout)
data = type(data)(data, layout)
elif isinstance(data, list):
inputs = []
checked = False
for datum in data:
(is_dlpack, is_gpu_data) = _b.CheckDLPackCapsule(datum)
if not is_dlpack and not checked:
_check_data_batch(data, batch_size, layout)
checked = True
if isinstance(datum, (_tensors.TensorCPU, _tensors.TensorGPU)):
inp = type(datum)(datum, layout=layout) if layout is not None else datum
elif is_dlpack:
if is_gpu_data:
inp = _tensors.TensorGPU(datum, layout or "")
else:
inp = _tensors.TensorCPU(datum, layout or "")
elif hasattr(datum, "__cuda_array_interface__"):
array_device_id = _types._get_device_id_for_array(datum)
if array_device_id is None:
array_device_id = device_id
inp = _tensors.TensorGPU(datum, layout or "", array_device_id)
else:
datum = to_numpy(datum)
inp = _tensors.TensorCPU(datum, layout or "")
inputs.append(inp)
assert all(isinstance(inp, type(inputs[0])) for inp in inputs), \
"Mixed input types are not support, all need to reside on the CPU or GPU"
data = inputs
else:
(is_dlpack, is_gpu_data) = _b.CheckDLPackCapsule(data)
if not is_dlpack:
_check_data_batch(data, batch_size, layout)
if hasattr(data, "__cuda_array_interface__"):
array_device_id = _types._get_device_id_for_array(data)
if array_device_id is None:
array_device_id = device_id
data = _tensors.TensorListGPU(data, layout or "", array_device_id)
elif is_dlpack:
if is_gpu_data:
data = _tensors.TensorListGPU(data, layout or "")
else:
data = _tensors.TensorListCPU(data, layout or "")
else:
data = to_numpy(data)
data = _tensors.TensorListCPU(data, layout or "")
return data
class _ExternalDataBatch:
def __init__(self, group, pipeline, data, batch_size):
self._group = group
self._pipepline = pipeline
self._data = data
self._batch_size = batch_size
def feed(self):
self._group.feed(self._pipepline, self._data, self._batch_size)
class _ExternalSourceGroup(object):
def __init__(self, callback, source_desc, is_multioutput, instances=[], *,
cuda_stream=None, use_copy_kernel=None, batch=True, parallel=False,
prefetch_queue_depth=None, bytes_per_sample_hint=None, batch_info=None):
self.instances = list(instances) # we need a copy!
self.utilized_instances = self.instances
self.is_multioutput = is_multioutput
self.callback = callback
self.source_desc = source_desc
self._cuda_stream = cuda_stream
self.use_copy_kernel = use_copy_kernel
self.batch = batch
self.batch_info = batch_info
# Index of a batch within the epoch that will be returned from
# get_batch or schedule_and_receive call. Contrary to Pipeline's
# `epoch_idx` it is tracked separately by ExternalSourceGroup due to
# prefetching of batches in parallel mode.
self.current_iter = 0
self.current_sample = 0
self.parallel = parallel
self.prefetch_queue_depth = prefetch_queue_depth
self.bytes_per_sample_hint = bytes_per_sample_hint
if callback is not None:
arg_count = _accepted_arg_count(callback)
if arg_count not in [0, 1]:
raise TypeError("External source callback must be a callable with 0 or 1 argument")
self.accepts_arg = arg_count > 0
def append(self, instance):
self.instances.append(instance)
self.utilized_instances = self.instances
def disable_pruned_instances(self, pruned_mask):
if len(pruned_mask) != len(self.instances):
raise RuntimeError(
f"Mask of the pruned outputs of the external source must have the length matching "
f"the number of outputs of the external source. The external source node has "
f"{len(self.instances)} outputs, but received mask of length {len(pruned_mask)}.")
self.utilized_instances = [
instance for instance, is_pruned
in zip(self.instances, pruned_mask) if not is_pruned
]
def callback_args(self, idx_in_batch, epoch_idx, batch_size=0, lead=0):
"""Generate information to be passed to ES callback.
Args:
idx_in_batch: Index in batch for per-sample mode, None indicates batch mode where we
pass only the iteration number.
lead: how many batches ahead is this job wrt actual iteration
"""
if not self.accepts_arg:
return ()
if idx_in_batch is not None:
arg = nvidia.dali.types.SampleInfo(
self.current_sample + idx_in_batch + batch_size * lead, idx_in_batch,
self.current_iter + lead, epoch_idx)
elif self.batch_info:
arg = nvidia.dali.types.BatchInfo(self.current_iter + lead, epoch_idx)
else:
arg = self.current_iter + lead
return (arg, )
def reset_indices(self):
self.current_iter = 0
self.current_sample = 0
def prefetch(self, pool, context_i, batch_size, epoch_idx):
# NOTE We can't schedule more than what's on top of pipeline's prefetch queue, as the
# entires in the pipeline are zero-copy and cannot be overwritten.
context = pool.contexts[context_i]
while (context.scheduled_ahead < self.prefetch_queue_depth
and self.schedule_batch(pool, context_i, context.scheduled_ahead,
batch_size, epoch_idx)):
pass
def schedule_batch(self, pool, context_i, lead, batch_size, epoch_idx):
"""Schedule computing new batch from source callback by the parallel pool."""
if self.batch:
return pool.schedule_batch(
context_i, _TaskArgs.make_batch(self.callback_args(None, epoch_idx, lead=lead)))
else:
sample_range_start = self.current_sample + batch_size * lead
sample_range_end = sample_range_start + batch_size
iteration = self.current_iter + lead
sample_range = _SampleRange(sample_range_start, sample_range_end, iteration, epoch_idx)
work_batch = _TaskArgs.make_sample(sample_range)
return pool.schedule_batch(context_i, work_batch)
def schedule_and_receive(self, pipeline, pool, context_i, batch_size, epoch_idx):
"""Obtain the computed results of calling source callback in parallel pool and feed
the results to the ExternalSource nodes in `pipeline`.
Schedule the execution of the source callback in the pool to compute next batch.
Used by the parallel ExternalSource variant.
Args:
context_i (int): Index of the callback (in the list of parallel groups)"""
try:
callback_out = pool.receive_batch(context_i)
self.current_sample += batch_size
self.current_iter += 1
self.prefetch(pool, context_i, batch_size, epoch_idx)
return _ExternalDataBatch(self, pipeline, callback_out, batch_size)
except StopIteration:
self.reset_indices()
pool.reset_context(context_i)
raise
def get_batch(self, pipeline, batch_size, epoch_idx):
"""Call the source callback and feed the results to the ExternalSource nodes in `pipeline`.
Used for the sequential ExternalSource variant."""
try:
if self.batch:
callback_out = self.callback(*self.callback_args(None, epoch_idx))
else:
callback_out = [
self.callback(*self.callback_args(i, epoch_idx)) for i in range(batch_size)
]
self.current_sample += batch_size
self.current_iter += 1
except StopIteration:
self.reset_indices()
raise
return _ExternalDataBatch(self, pipeline, callback_out, batch_size)
def feed(self, pipeline, callback_out, batch_size):
"""Feed the `callback_out` data obtained from source to the ExternalSource nodes
in the `pipeline`"""
if self.is_multioutput:
for op in self.utilized_instances:
if self.batch:
data = callback_out[op._output_index]
else:
# extract a single output
data = [callback_out[i][op._output_index] for i in range(batch_size)]
pipeline._feed_input(op._name, data, op._layout, self._cuda_stream,
self.use_copy_kernel)
else:
data = callback_out
op = self.utilized_instances[0]
pipeline._feed_input(
op._name, data, op._layout, self._cuda_stream, self.use_copy_kernel)
class ExternalSource():
"""ExternalSource is a special operator that can provide data to a DALI pipeline
from Python by several methods.
The simplest and preferred way is to specify a ``source``, which can be a callable or iterable.
.. note::
:meth:`nvidia.dali.fn.external_source` operator is partially compatible with TensorFlow
integration via :meth:`nvidia.dali.plugin.tf.experimental.DALIDatasetWithInputs`.
Please refer to its documentation for details.
.. note::
To return a batch of copies of the same tensor, use :func:`nvidia.dali.types.Constant`,
which is more performant.
"""
_args_doc = """
Args
----
`source` : callable or iterable
The source of the data.
The source is polled for data (via a call ``source()`` or ``next(source)``)
when the pipeline needs input for the next iteration. Depending on the value of ``num_outputs``,
the source can supply one or more data items. The data item can be a whole batch (default) or
a single batch entry (when ``batch==False``). If ``num_outputs`` is not set, the ``source``
is expected to return one item (a batch or a sample). If this value is specified (even if its
value is 1), the data is expected to a be tuple, or list, where each element corresponds to
respective return value of the external_source.
The data samples must be in one of the compatible array types:
* NumPy ndarray (CPU)
* MXNet ndarray (CPU)
* PyTorch tensor (CPU or GPU)
* CuPy array (GPU)
* objects implementing ``__cuda_array_interface__``
* DALI `Tensor` object
Batch sources must produce entire batches of data. This can be achieved either by adding a new
outermost dimension to an array or by returning a list of arrays (in which case they can be of
different size, but must have the same rank and element type). A batch source can also
produce a DALI `TensorList` object, which can be an output of another DALI pipeline.
A per-batch source may accept one positional argument. If it does, it is the index of current
iteration within epoch and consecutive calls will be ``source(0)``, ``source(1)``, and so on.
If `batch_info` is set to True, instance of :class:`nvidia.dali.types.BatchInfo` will be
passed to the source, instead of a plain index.
A per-sample source may accept one positional argument of type
:class:`nvidia.dali.types.SampleInfo`, which contains index of the sample in current epoch and
in the batch, as well as current iteration number.
If the source is a generator function, the function is invoked and treated as an iterable.
However, unlike a generator, the function can be used with ``cycle``. In this case, the function
will be called again when the generator reaches the end of iteration.
For GPU inputs, it is a user's responsibility to modify the provided GPU memory content
only in the provided stream. DALI schedules a copy on this stream, and all work is properly
queued. If no stream is provided, DALI will use a default, with a best-effort approach at
correctness. See the ``cuda_stream`` argument documentation for more information.
`num_outputs` : int, optional
If specified, denotes the number of TensorLists that are produced by the source function.
If set, the operator returns a list of ``DataNode`` objects, otherwise a single ``DataNode``
object is returned.
Keyword Args
------------
`cycle`: string or bool, optional
Specifies if and how to cycle through the source.
It can be one of the following values:
* ``"no"``, ``False`` or ``None`` - don't cycle; ``StopIteration`` is raised when
end of data is reached; this is the default behavior
* ``"quiet"`` or ``True`` - the data is repeated indefinitely,
* ``"raise"`` - when the end of data is reached, ``StopIteration`` is raised, but
the iteration is restarted on subsequent call.
This flag requires that the ``source`` is a collection, for example, an iterable object where
``iter(source)`` returns a fresh iterator on each call, or a generator function.
In the latter case, the generator function is called again when more data than was
yielded by the function is requested.
Specifying ``"raise"`` can be used with DALI iterators to create a notion of epoch.
`name` : str, optional
The name of the data node.
Used when feeding the data in ``iter_setup`` and can be omitted if
the data is provided by ``source``.
`layout` : :ref:`layout str<layout_str_doc>` or list/tuple thereof, optional
If provided, sets the layout of the data.
When ``num_outputs > 1``, the layout can be a list that contains a distinct layout for each
output. If the list has fewer than ``num_outputs`` elements, only the first
outputs have the layout set, the rest of the outputs don't have a layout set.
`dtype` : `nvidia.dali.types.DALIDataType` or list/tuple thereof, optional
Input data type.
When ``num_outputs > 1``, the ``dtype`` can be a list that contains a distinct value for each
output.
The operator will validate that the fetched data is of the provided type.
If the argument is omitted or :const:`DALIDataType.NO_TYPE` is passed, the operator will infer
the type from the provided data.
This argument will be required starting from DALI 2.0.
`ndim` : int or list/tuple thereof, optional
Number of dimensions in the input data.
When ``num_outputs > 1``, the ``ndim`` can be a list that contains a distinct value for each
output.
The dimensionality of the data provided to the operator will be verified against this value.
Number of dimensions can be also inferred from the ``layout`` argument if provided.
If the ``layout`` argument is provided, the ``ndim`` must match
the number of dimensions in the layout.
Specifying the input dimensionality will be required starting from DALI 2.0
`cuda_stream` : optional, ``cudaStream_t`` or an object convertible to ``cudaStream_t``,
such as ``cupy.cuda.Stream`` or ``torch.cuda.Stream``
The CUDA stream is used to copy data to the GPU or from a GPU source.
If this parameter is not set, a best-effort will be taken to maintain correctness. That is,
if the data is provided as a tensor/array from a recognized library such as CuPy or PyTorch,
the library's current stream is used. Although this approach works in typical scenarios,
with advanced use cases, and code that uses unsupported libraries, you might need to
explicitly supply the stream handle.
This argument has two special values:
* 0 - Use the default CUDA stream
* 1 - Use DALI's internal stream
If internal stream is used, the call to ``feed_input`` will block until the copy to internal
buffer is complete, since there's no way to synchronize with this stream to prevent
overwriting the array with new data in another stream.
`use_copy_kernel` : bool, optional
If set to True, DALI will use a CUDA kernel to feed the data
instead of cudaMemcpyAsync (default).
.. note::
This is applicable only when copying data to and from GPU memory.
`blocking` : bool, optional
Determines whether the external source should wait until data is available or just fail
when the data is not available.
`no_copy` : bool, optional
Determines whether DALI should copy the buffer when feed_input is called.
If set to True, DALI passes the user memory directly to the pipeline, instead of copying it.
It is the user responsibility to keep the buffer alive and unmodified until it is
consumed by the pipeline.
The buffer can be modified or freed again after the output of the relevant iterations
has been consumed. Effectively, it happens after Pipeline's ``prefetch_queue_depth`` or
``cpu_queue_depth * gpu_queue_depth`` (when they are not equal) iterations following
the ``feed_input`` call.
The memory location must match the specified ``device`` parameter of the operator.
For the CPU, the provided memory can be one contiguous buffer or a list of contiguous Tensors.
For the GPU, to avoid extra copy, the provided buffer must be contiguous. If you provide a list
of separate Tensors, there will be an additional copy made internally, consuming both memory
and bandwidth.
Automatically set to ``True`` when ``parallel=True``
`batch` : bool, optional
If set to True or None, the ``source`` is expected to produce an entire batch at once.
If set to False, the ``source`` is called per-sample.
Setting ``parallel`` to True automatically sets ``batch`` to False if it was not provided.
`batch_info` : bool, optional, default = False
Controls if a callable ``source`` that accepts an argument and returns batches should receive
:class:`~nvidia.dali.types.BatchInfo` instance or just an
integer representing the iteration number.
If set to False (the default), only the integer is passed. If ``source`` is not callable,
does not accept arguments or ``batch`` is set to False, setting this flag has no effect.
`parallel` : bool, optional, default = False
If set to True, the corresponding pipeline will start a pool of Python workers to run the
callback in parallel. You can specify the number of workers by passing ``py_num_workers``
into pipeline's constructor.
When ``parallel`` is set to True, samples returned by ``source`` must be
NumPy/MXNet/PyTorch CPU arrays or TensorCPU instances.
|
Acceptable sources depend on the value specified for ``batch`` parameter.
If ``batch`` is set to ``False``, the ``source`` must be:
* a callable (a function or an object with ``__call__`` method) that accepts
exactly one argument (:class:`~nvidia.dali.types.SampleInfo` instance
that represents the index of the requested sample).
If ``batch`` is set to ``True``, the ``source`` can be either:
* a callable that accepts exactly one argument (either :class:`~nvidia.dali.types.BatchInfo`
instance or an integer - see ``batch_info`` for details)
* an iterable,
* a generator function.
|
.. warning::
Irrespective of ``batch`` value, callables should be stateless - they should produce
requested sample or batch solely based on the
:class:`~nvidia.dali.types.SampleInfo`/:class:`~nvidia.dali.types.BatchInfo`
instance or index in batch, so that they can be run in parallel in a number of workers.
The ``source`` callback must raise a ``StopIteration`` when the end of the data is reached.
Note, that due to prefetching, the callback may be invoked with a few iterations past
the end of dataset - make sure it consistently raises a ``StopIteration`` in that case.
|
.. note::
Callable ``source`` can be run in parallel by multiple workers.
For ``batch=True`` multiple batches can be prepared in parallel, with ``batch=False``
it is possible to parallelize computation within the batch.
When ``batch=True``, callables performance might especially benefit from increasing
``prefetch_queue_depth`` so that a few next batches can be computed in parallel.
|
.. note::
Iterator or generator function will be assigned to a single worker that will
iterate over them. The main advantage is execution in parallel to the main
Python process, but due to their state it is not possible to calculate more
than one batch at a time.
`repeat_last` : bool, optional, default = False
.. note::
This is an advanced setting that is usable mainly with Triton Inference Server
with decoupled models.
Normally, ``external_source`` consumes its input data and expects new ones to be fed in the
upcoming iteration. Setting ``repeat_last=True`` changes this behavior so that
``external_source`` will detect that no new data was fed between the previous pipeline run and
the currnet one and will self-refeed with the most recent data.
Setting ``repeat_last`` to `True` only makes sense in "push" mode, i.e. when the data is
actively provided by the user via a call to ``feed_input``. Enabling this option is incompatible
with specifying the ``source``, which makes the ``external_source`` operate in "pull" mode.
`prefetch_queue_depth` : int, optional, default = 1
When run in ``parallel=True`` mode, specifies the number of batches to be computed in
advance and stored in the internal buffer, otherwise parameter is ignored.
`bytes_per_sample_hint`: int, optional, default = None
If specified in ``parallel=True`` mode, the value serves as a hint when
calculating initial capacity of shared memory slots used by the worker processes to pass
parallel external source outputs to the pipeline. The argument is ignored in non-parallel mode.
Setting a value large enough to accommodate the incoming data can prevent DALI from
reallocation of shared memory during the pipeline's run. Furthermore, providing the
hint manually can prevent DALI from overestimating the necessary shared memory capacity.
The value must be a positive integer.
Please note that the samples in shared memory are accompanied by some internal meta-data,
thus, the actual demand for the shared memory is slightly higher than just the size
of binary data produced by the external source. The actual meta-data size depends on
the number of factors and, for example, may change between Python or DALI
releases without notice.
Please refer to pipeline's ``external_source_shm_statistics`` for inspecting how much
shared memory is allocated for data produced by the pipeline's parallel external sources.
"""
def __init__(self, source=None, num_outputs=None, *, cycle=None, layout=None, dtype=None,
ndim=None, name=None, device="cpu", cuda_stream=None, use_copy_kernel=None,
batch=None, parallel=None, no_copy=None, prefetch_queue_depth=None,
bytes_per_sample_hint=None, batch_info=None, repeat_last=False, **kwargs):
self._schema = _b.GetSchema("ExternalSource")
self._spec = _b.OpSpec("ExternalSource")
self._device = device
self._layout = layout
self._dtype = dtype
self._ndim = ndim
self._cuda_stream = cuda_stream
self._use_copy_kernel = use_copy_kernel
import nvidia.dali.ops
kwargs, self._call_args = nvidia.dali.ops._separate_kwargs(kwargs)
callback, source_desc = _get_callback_from_source(source, cycle, batch_info or False)
if name is not None and num_outputs is not None:
raise ValueError("`num_outputs` is not compatible with named `ExternalSource`")
self._name = name
self._num_outputs = num_outputs
self._batch = batch
self._callback = callback
self._source_desc = source_desc
self._parallel = parallel
self._no_copy = no_copy
self._prefetch_queue_depth = prefetch_queue_depth
self._bytes_per_sample_hint = bytes_per_sample_hint
self._batch_info = batch_info
self._repeat_last = repeat_last
self._spec.AddArg("device", device)
self._spec.AddArg("repeat_last", repeat_last)
for key, value in kwargs.items():
self._spec.AddArg(key, value)
@property
def spec(self):
return self._spec
@property
def schema(self):
return self._schema
@property
def device(self):
return self._device
@property
def preserve(self):
return False
def __call__(self, *, source=None, cycle=None, name=None, layout=None, dtype=None, ndim=None,
cuda_stream=None, use_copy_kernel=None, batch=None, parallel=None, no_copy=None,
prefetch_queue_depth=None, bytes_per_sample_hint=None, batch_info=None,
repeat_last=False, **kwargs):
""
from nvidia.dali.ops import _OperatorInstance
if batch_info is None:
batch_info = self._batch_info or False
elif self._batch_info is not None:
raise ValueError("The argument ``batch_info`` already specified in constructor.")
if source is None:
if cycle is not None:
if self._callback:
raise ValueError(
"The argument ``cycle`` can only be specified if ``source`` is an"
"iterable object or a generator function specified in this call. "
"To cycle through an iterable specified in "
"``__init__``, set ``cycle`` there.")
else:
raise ValueError(
"The argument ``cycle`` can only be specified if ``source`` is a "
"reusable iterable or a generator function.")
callback = self._callback
source_desc = self._source_desc
else:
if self._callback is not None:
raise RuntimeError("``source`` already specified in constructor.")
callback, source_desc = _get_callback_from_source(source, cycle, self._batch_info)
# Keep the metadata for Pipeline inspection
self._source_desc = source_desc
if callback is not None and repeat_last:
raise ValueError("``repeat_last`` must not be set when using the ``source`` argument "
"It's usable only with manually fed ``external_source``.")
if parallel is None:
parallel = self._parallel or False
elif self._parallel is not None:
raise ValueError("The argument ``parallel`` already specified in constructor.")
if batch is None:
batch = self._batch
elif self._batch is not None:
raise ValueError("The argument ``batch`` already specified in constructor.")
# By default parallel is False, so batch will be True
if batch is None:
batch = not parallel
if prefetch_queue_depth is None:
prefetch_queue_depth = self._prefetch_queue_depth
elif self._prefetch_queue_depth is not None:
raise ValueError(
"The argument ``prefetch_queue_depth`` already specified in constructor.")
if bytes_per_sample_hint is None:
bytes_per_sample_hint = self._bytes_per_sample_hint
elif self._bytes_per_sample_hint is not None:
raise ValueError(
"The argument ``bytes_per_sample_hint`` already specified in constructor.")
if no_copy is None:
no_copy = self._no_copy
elif self._no_copy is not None:
raise ValueError("The argument ``no_copy`` already specified in constructor.")
if parallel:
if prefetch_queue_depth is None:
prefetch_queue_depth = 1
if no_copy is None:
no_copy = True
if not no_copy:
raise ValueError("The argument ``no_copy`` cannot be specified to False "
" when used with ``parallel=True``.")
if prefetch_queue_depth < 1:
raise ValueError(
"``prefetch_queue_depth`` must be a positive integer, got {}.".format(
prefetch_queue_depth))
if bytes_per_sample_hint is not None and bytes_per_sample_hint < 1:
raise ValueError(
f"``bytes_per_sample_hint`` must be a positive integer, "
f"got {bytes_per_sample_hint}."
)
if source_desc.kind == _SourceKind.CALLABLE:
if not source_desc.has_inputs:
raise TypeError(
"Callable passed to External Source in parallel mode "
"(when `parallel=True`) must accept exactly one argument: "
"`nvidia.dali.types.SampleInfo` if run with `batch=False` or "
"either `nvidia.dali.types.BatchInfo` or integer that "
"represents the index of the batch within the epoch if `batch=True`. "
"Got a callable that does not accept arguments instead.")
elif not batch:
if source_desc.kind == _SourceKind.ITERABLE:
what = "an iterable"
else:
what = "a generator function"
raise TypeError(
"Parallel external source with {} must be run in a batch mode "
"(specify `batch=True` in the external source definition and make sure "
"your source returns batches)".format(what))
else:
for kwarg_value, kwarg_name in ((prefetch_queue_depth, "prefetch_queue_depth"),
(bytes_per_sample_hint, "bytes_per_sample_hint")):
if kwarg_value is not None:
raise ValueError(f"The argument `{kwarg_name}` is valid only for "
"parallel external sources (when ``parallel`` is True).")
if self._layout is not None:
if layout is not None:
raise RuntimeError("``layout`` already specified in constructor.")
else:
layout = self._layout
if self._dtype is not None:
if dtype is not None:
raise RuntimeError("``dtype`` already specified in constructor.")
else:
dtype = self._dtype
if self._ndim is not None:
if ndim is not None:
raise RuntimeError("``ndim`` already specified in constructor.")
else:
ndim = self._ndim
if self._cuda_stream is not None:
if cuda_stream is not None:
raise RuntimeError("``cuda_stream`` already specified in constructor.")
else:
cuda_stream = self._cuda_stream
if self._use_copy_kernel is not None:
if use_copy_kernel is not None:
raise RuntimeError("``use_copy_kernel`` already specified in constructor.")
else:
use_copy_kernel = self._use_copy_kernel
if name is None:
name = self._name
else:
self._name = name
if name is not None and self._num_outputs is not None:
raise RuntimeError("``num_outputs`` is not compatible with named ``ExternalSource``.")
group_common_kwargs = {
'cuda_stream': cuda_stream,
'use_copy_kernel': use_copy_kernel,
'batch': batch,
'batch_info': batch_info,
'parallel': parallel,
'prefetch_queue_depth': prefetch_queue_depth,
'bytes_per_sample_hint': bytes_per_sample_hint
}
if self._num_outputs is not None:
outputs = []
kwargs = {"no_copy": no_copy}
group = _ExternalSourceGroup(callback, source_desc, True, **group_common_kwargs)
for i in range(self._num_outputs):
if dtype is not None:
if isinstance(dtype, (list, tuple)):
kwargs['dtype'] = dtype[i] if i < len(
dtype) else nvidia.dali.types.DALIDataType.NO_TYPE
else:
kwargs['dtype'] = dtype
if ndim is not None:
if isinstance(ndim, (list, tuple)):
kwargs['ndim'] = ndim[i] if i < len(ndim) else None
else:
kwargs['ndim'] = ndim
this_layout = None
if layout is not None:
if isinstance(layout, (list, tuple)):
this_layout = layout[i] if i < len(layout) else ""
else:
this_layout = layout
kwargs['layout'] = this_layout
op_instance = _OperatorInstance([], self, **kwargs)
op_instance._callback = callback
op_instance._output_index = i
op_instance._group = group
op_instance._layout = this_layout
op_instance._batch = batch
group.append(op_instance)
op_instance.generate_outputs()
outputs.append(op_instance.unwrapped_outputs)
return outputs
else:
if name is not None:
kwargs["name"] = name
if no_copy is not None:
kwargs["no_copy"] = no_copy
if dtype is not None:
kwargs['dtype'] = dtype
if ndim is not None:
kwargs['ndim'] = ndim
if layout is not None:
kwargs['layout'] = layout
op_instance = _OperatorInstance([], self, **kwargs)
op_instance._callback = callback
op_instance._output_index = None
op_instance._group = _ExternalSourceGroup(callback, source_desc, False, [op_instance],
**group_common_kwargs)
op_instance._layout = layout
op_instance._batch = batch
op_instance.generate_outputs()
return op_instance.unwrapped_outputs
__doc__ += _args_doc
__call__.__doc__ += _args_doc
def _is_external_source_with_callback(op_instance):
return isinstance(op_instance._op, ExternalSource) and op_instance._callback is not None
def _is_external_source(op_instance):
return isinstance(op_instance._op, ExternalSource)
def _has_external_source(pipeline):
if not pipeline._py_graph_built:
pipeline._build_graph()
for op in pipeline._ops:
if _is_external_source(op):
return True
return False
def external_source(source=None, num_outputs=None, *, cycle=None, name=None, device="cpu",
layout=None, dtype=None, ndim=None, cuda_stream=None, use_copy_kernel=None,
batch=True, repeat_last=False, **kwargs):
"""Creates a data node which is populated with data from a Python source.
The data can be provided by the ``source`` function or iterable, or it can be provided by
``pipeline.feed_input(name, data, layout, cuda_stream)`` inside ``pipeline.iter_setup``.
In the case of the GPU input, it is the user responsibility to modify the
provided GPU memory content only using provided stream (DALI schedules a copy on it
and all work is properly queued). If no stream is provided feeding input blocks until the
provided memory is copied to the internal buffer.
.. note::
:meth:`nvidia.dali.fn.external_source` operator is partially compatible with TensorFlow
integration via :meth:`nvidia.dali.plugin.tf.experimental.DALIDatasetWithInputs`.
Please refer to its documentation for details.
.. note::
To return a batch of copies of the same tensor, use :func:`nvidia.dali.types.Constant`,
which is more performant.
"""
from nvidia.dali._debug_mode import _PipelineDebug
from nvidia.dali import _conditionals
def _external_source(source=None, num_outputs=None, *, cycle=None, name=None, device="cpu",
layout=None, dtype=None, ndim=None, cuda_stream=None, use_copy_kernel=None,
repeat_last=False, batch=True, **kwargs):
if batch is None:
batch = True
if num_outputs is not None:
if source is None:
raise ValueError(
"The parameter ``num_outputs`` is only valid when using ``source`` to "
"provide data. To feed multiple external sources in ``feed_input``, "
"use multiple ``external_source`` nodes.")
op = ExternalSource(device=device, num_outputs=num_outputs, source=source, cycle=cycle,
layout=layout, dtype=dtype, ndim=ndim, cuda_stream=cuda_stream,
use_copy_kernel=use_copy_kernel, batch=batch, repeat_last=repeat_last,
**kwargs)
return op(name=name)
# Wrapper around external_source to switch between standard and debug mode.
current_pipeline = _PipelineDebug.current()
if getattr(current_pipeline, '_debug_on', False):
result = current_pipeline._external_source(
source=source, num_outputs=num_outputs, cycle=cycle, name=name, device=device,
layout=layout, batch=batch, repeat_last=repeat_last, **kwargs)
else:
result = _external_source(source, num_outputs, cycle=cycle, name=name, device=device,
layout=layout, dtype=dtype, ndim=ndim, cuda_stream=cuda_stream,
use_copy_kernel=use_copy_kernel, batch=batch,
repeat_last=repeat_last, **kwargs)
if _conditionals.conditionals_enabled():
_conditionals.register_data_nodes(result)
return result
external_source.__doc__ += ExternalSource._args_doc
|
DALI-main
|
dali/python/nvidia/dali/external_source.py
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def get_include_dir():
"""Get the path to the directory containing C++ header files.
Returns:
String representing the path to the include directory
"""
# Import inside the function to avoid circular import as dali imports sysconfig
import nvidia.dali as dali
return os.path.join(os.path.dirname(dali.__file__), 'include')
def get_lib_dir():
"""Get the path to the directory containing DALI library.
Returns:
String representing the path to the library directory
"""
import nvidia.dali as dali
return os.path.dirname(dali.__file__)
def get_include_flags():
"""Get the include flags for custom operators
Returns:
The compilation flags
"""
flags = []
flags.append('-I%s' % get_include_dir())
return flags
def get_compile_flags():
"""Get the compilation flags for custom operators
Returns:
The compilation flags
"""
import nvidia.dali.backend as b
flags = []
flags.append('-I%s' % get_include_dir())
flags.append('-D_GLIBCXX_USE_CXX11_ABI=%d' % b.GetCxx11AbiFlag())
return flags
def get_link_flags():
"""Get the link flags for custom operators
Returns:
The link flags
"""
flags = []
flags.append('-L%s' % get_lib_dir())
flags.append('-ldali')
return flags
|
DALI-main
|
dali/python/nvidia/dali/sysconfig.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import pickle
import io
from nvidia.dali import reducers
class _DaliPickle:
@staticmethod
def dumps(obj, protocol=None, **kwargs):
f = io.BytesIO()
reducers.DaliCallbackPickler(f, protocol, **kwargs).dump(obj)
return f.getvalue()
@staticmethod
def loads(s, **kwargs):
return pickle.loads(s, **kwargs)
class _CustomPickler:
@classmethod
def create(cls, py_callback_pickler):
if py_callback_pickler is None or isinstance(py_callback_pickler, cls):
return py_callback_pickler
if hasattr(py_callback_pickler, 'dumps') and hasattr(py_callback_pickler, 'loads'):
return cls.create_from_reducer(py_callback_pickler)
if isinstance(py_callback_pickler, (tuple, list)):
params = [None] * 3
for i, item in enumerate(py_callback_pickler):
params[i] = item
reducer, kwargs_dumps, kwargs_loads = params
return cls.create_from_reducer(reducer, kwargs_dumps, kwargs_loads)
raise ValueError("Unsupported py_callback_pickler value provided.")
@classmethod
def create_from_reducer(cls, reducer, dumps_kwargs=None, loads_kwargs=None):
return cls(reducer.dumps, reducer.loads, dumps_kwargs, loads_kwargs)
def __init__(self, dumps, loads, dumps_kwargs, loads_kwargs):
self._dumps = dumps
self._loads = loads
self.dumps_kwargs = dumps_kwargs or {}
self.loads_kwargs = loads_kwargs or {}
def dumps(self, obj):
return self._dumps(obj, **self.dumps_kwargs)
def loads(self, obj):
return self._loads(obj, **self.loads_kwargs)
def pickle_by_value(fun):
"""
Hints parallel external source to serialize a decorated global function by value
rather than by reference, which would be a default behavior of Python's pickler.
"""
if inspect.isfunction(fun):
setattr(fun, '_dali_pickle_by_value', True)
return fun
else:
raise TypeError("Only functions can be explicitely set to be pickled by value")
|
DALI-main
|
dali/python/nvidia/dali/pickling.py
|
# Copyright (c) 2018, 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import backend as b
from nvidia.dali import ops as ops
def load_library(library_path: str, global_symbols: bool = False):
"""Loads a DALI plugin, containing one or more operators.
Args:
library_path: Path to the plugin library (relative or absolute)
global_symbols: If ``True``, the library is loaded with ``RTLD_GLOBAL`` flag or equivalent;
otherwise ``RTLD_LOCAL`` is used. Some libraries (for example Halide) require being
loaded with ``RTLD_GLOBAL`` - use this setting if your plugin uses any such library.
Returns:
None.
Raises:
RuntimeError: when unable to load the library.
"""
b.LoadLibrary(library_path, global_symbols)
ops.Reload()
|
DALI-main
|
dali/python/nvidia/dali/plugin_manager.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from nvidia.dali import types
from nvidia.dali import tensors
import inspect
import functools
np = None
def import_numpy():
"""Import numpy lazily, need to define global `np = None` variable"""
global np
if np is None:
try:
import numpy as np
except ImportError:
raise RuntimeError('Could not import numpy. Please make sure you have numpy '
'installed before you use parallel mode.')
class SourceKind(Enum):
CALLABLE = 0
ITERABLE = 1
GENERATOR_FUNC = 2
class SourceDescription:
"""Keep the metadata about the source parameter that was originally passed
"""
def __init__(self, source, kind: SourceKind, has_inputs: bool, cycle: str, batch_info=False):
self.source = source
self.kind = kind
self.has_inputs = has_inputs
self.cycle = cycle
self.batch_info = batch_info
def __str__(self) -> str:
if self.kind == SourceKind.CALLABLE:
inputs = ("with" if self.has_inputs else "without")
return f"Callable source {inputs} inputs: `{self.source}`"
elif self.kind == SourceKind.ITERABLE:
return f"Iterable (or iterator) source: `{self.source}` with cycle: `{self.cycle}`."
else:
return f"Generator function source: `{self.source}` with cycle: `{self.cycle}`."
_tf_sample_error_msg = (
"Unsupported callback return type. Expected NumPy array, PyTorch or MXNet cpu tensors, "
"DALI TensorCPU representing sample. Got `{}` instead.")
_tf_batch_error_msg = (
"Unsupported callback return type. Expected NumPy array, PyTorch or MXNet cpu tensors, "
"DALI TensorCPU, list of those types or DALI TensorListCPU representing batch. "
"Got `{}` instead.")
_tf_uniform_error_msg = (
"Unsupported callback return value. TensorFlow requires that the batches produced by input "
"datasets or External Source `source` callback in batch mode (that is when batch=True) "
" are dense and uniform - this means that every sample has the same shape. Got `{}` instead.")
def assert_cpu_sample_data_type(sample, error_str="Unsupported callback return type. Got: `{}`."):
import_numpy()
if isinstance(sample, np.ndarray):
return True
if types._is_mxnet_array(sample):
if sample.context.device_type != 'cpu':
raise TypeError("Unsupported callback return type. "
"GPU tensors are not supported. Got an MXNet GPU tensor.")
return True
if types._is_torch_tensor(sample):
if sample.device.type != 'cpu':
raise TypeError("Unsupported callback return type. "
"GPU tensors are not supported. Got a PyTorch GPU tensor.")
return True
elif isinstance(sample, tensors.TensorCPU):
return True
raise TypeError(error_str.format(type(sample)))
def assert_cpu_batch_data_type(batch, error_str="Unsupported callback return type. Got: `{}`."):
import_numpy()
if isinstance(batch, tensors.TensorListCPU):
return True
elif isinstance(batch, list):
for sample in batch:
assert_cpu_sample_data_type(sample, error_str)
return True
elif assert_cpu_sample_data_type(batch, error_str):
# Bach can be repsented as dense tensor
return True
else:
raise TypeError(error_str.format(type(batch)))
def sample_to_numpy(sample, error_str="Unsupported callback return type. Got: `{}`."):
import_numpy()
assert_cpu_sample_data_type(sample, error_str)
if isinstance(sample, np.ndarray):
return sample
if types._is_mxnet_array(sample):
if sample.context.device_type != 'cpu':
raise TypeError("Unsupported callback return type. "
"GPU tensors are not supported. Got an MXNet GPU tensor.")
return sample.asnumpy()
if types._is_torch_tensor(sample):
if sample.device.type != 'cpu':
raise TypeError("Unsupported callback return type. "
"GPU tensors are not supported. Got a PyTorch GPU tensor.")
return sample.numpy()
elif isinstance(sample, tensors.TensorCPU):
return np.array(sample)
raise TypeError(error_str.format(type(sample)))
def batch_to_numpy(
batch,
error_str="Unsupported callback return type. Got: `{}`.",
non_uniform_str="Uniform input is required (batch of tensors of equal shapes), got {}."):
import_numpy()
assert_cpu_batch_data_type(batch, error_str)
if isinstance(batch, tensors.TensorListCPU):
if not batch.is_dense_tensor():
raise ValueError(non_uniform_str.format(batch))
return batch.as_array()
elif isinstance(batch, list):
result = [sample_to_numpy(sample, error_str) for sample in batch]
first_shape = result[0].shape
for sample in result:
if first_shape != sample.shape:
raise ValueError(non_uniform_str.format(batch))
return np.stack(result)
else:
return sample_to_numpy(batch, error_str)
class _CycleIter:
def __init__(self, iterable, mode):
self.source = iterable
self.signaling = (mode == "raise")
def __iter__(self):
self.it = iter(self.source)
return self
def __next__(self):
try:
return next(self.it)
except StopIteration:
self.it = iter(self.source)
if self.signaling:
raise
else:
return next(self.it)
class _CycleGenFunc():
def __init__(self, gen_func, mode):
self.source = gen_func
self.signaling = (mode == "raise")
def __iter__(self):
self.it = iter(self.source())
return self
def __next__(self):
try:
return next(self.it)
except StopIteration:
self.it = iter(self.source())
if self.signaling:
raise
else:
return next(self.it)
def _is_generator_function(x):
"""Checks whether x is a generator function or a callable object
where __call__ is a generator function"""
if inspect.isgeneratorfunction(x):
return True
if isinstance(x, functools.partial):
return _is_generator_function(x.func)
if x is None or inspect.isfunction(x) or inspect.ismethod(x):
return False
call = getattr(x, "__call__", None)
if call is x:
return False
return _is_generator_function(call)
def _cycle_enabled(cycle):
if cycle is None:
return False
if cycle is False or cycle == "no":
return False
if cycle is True or cycle == "quiet" or cycle == "raise":
return True
raise ValueError("""Invalid value {} for the argument `cycle`. Valid values are
- "no", False or None - cycling disabled
- "quiet", True - quietly rewind the data
- "raise" - raise StopIteration on each rewind.""".format(repr(cycle)))
def accepted_arg_count(callable):
if not (inspect.isfunction(callable) or inspect.ismethod(callable)) \
and hasattr(callable, '__call__'):
callable = callable.__call__
if not inspect.ismethod(callable):
implicit_args = 0
else:
implicit_args = 1
callable = callable.__func__
return callable.__code__.co_argcount - implicit_args
def get_callback_from_source(source, cycle, batch_info=False):
"""Repack the source into a unified callback function. Additionally prepare
the SourceDescription.
`batch_info` is usable only with callables.
Returns
-------
callback, SourceDescription
"""
iterable = False
desc = None
if source is not None:
try:
if _cycle_enabled(cycle):
if inspect.isgenerator(source):
raise TypeError("Cannot cycle through a generator - if the generator is "
"a result of calling a generator function, "
"pass that function instead as `source`.")
if _is_generator_function(source):
# We got a generator function, each call returns new "generator iterator"
desc = SourceDescription(source, SourceKind.GENERATOR_FUNC, False, cycle)
iterator = iter(_CycleGenFunc(source, cycle))
else:
# We hopefully got an iterable, iter(source) should return new iterator.
# TODO(klecki): Iterators are self-iterable (they return self from `iter()`),
# add a check if we have iterable and not iterator here,
# so we can better support cycle.
desc = SourceDescription(source, SourceKind.ITERABLE, False, cycle)
iterator = iter(_CycleIter(source, cycle))
else:
# In non-cycling case, we go over the data once.
if _is_generator_function(source):
# If we got a generator, we extract the "generator iterator"
desc = SourceDescription(source, SourceKind.GENERATOR_FUNC, False, cycle)
source = source()
else:
desc = SourceDescription(source, SourceKind.ITERABLE, False, cycle)
# We try to use the iterable/iterator.
# If this is callable instead, we will throw an error containing 'not iterable'
# in the error message.
iterator = iter(source)
iterable = True
callback = lambda: next(iterator) # noqa E731
except TypeError as err:
if "not iterable" not in str(err):
raise err
if cycle is not None:
raise ValueError("The argument `cycle` can only be specified "
"if `source` is iterable")
if not callable(source):
raise TypeError("Source must be callable, "
"iterable or a parameterless generator function")
# We got a callable
desc = SourceDescription(source, SourceKind.CALLABLE,
accepted_arg_count(source) > 0, cycle, batch_info)
callback = source
else:
desc = None
callback = None
if not iterable and cycle:
raise ValueError("`cycle` argument is only valid for iterable `source`")
return callback, desc
# TODO(klecki): Maybe keep this data here instead of doing the copy twice
def _inspect_data(data, is_batched):
# TODO(klecki): Add asserts for uniform input batches (as well as output batches)
if is_batched:
as_numpy = batch_to_numpy(data, _tf_batch_error_msg, non_uniform_str=_tf_uniform_error_msg)
if isinstance(as_numpy, list):
return as_numpy[0].dtype, (None,) * (as_numpy[0].ndim + 1)
else:
return as_numpy.dtype, (None,) * as_numpy.ndim
else:
as_numpy = sample_to_numpy(data, _tf_sample_error_msg)
return as_numpy.dtype, (None,) * as_numpy.ndim
def get_batch_iterable_from_callback(source_desc: SourceDescription):
"""Transform batch callback accepting one argument into an Iterable
"""
first = source_desc.source(types.BatchInfo(0, 0) if source_desc.batch_info else 0)
dtype, shape = _inspect_data(first, True)
class CallableBatchIterator:
first_value = first
def __init__(self):
self.iteration = 0
self.source = source_desc.source
def __iter__(self):
self.iteration = 0
return self
def __next__(self):
if self.iteration == 0 and CallableBatchIterator.first_value is not None:
result = CallableBatchIterator.first_value
CallableBatchIterator.first_value = None
else:
if source_desc.batch_info:
# There is no notion of epochs when iterating over DALI Dataset
# as the "raise" policy is not supported, so we use epoch 0 only.
argument = types.BatchInfo(self.iteration, 0)
else:
argument = self.iteration
result = self.source(argument)
self.iteration += 1
return batch_to_numpy(result, _tf_batch_error_msg,
non_uniform_str=_tf_uniform_error_msg)
return CallableBatchIterator, dtype, shape
def get_sample_iterable_from_callback(source_desc: SourceDescription, batch_size):
"""Transform sample callback accepting one argument into an Iterable
"""
first = source_desc.source(types.SampleInfo(0, 0, 0, 0))
dtype, shape = _inspect_data(first, False)
class CallableSampleIterator:
first_value = first
def __init__(self):
self.idx_in_epoch = 0
self.idx_in_batch = 0
self.iteration = 0
self.source = source_desc.source
def __iter__(self):
self.idx_in_epoch = 0
self.idx_in_batch = 0
self.iteration = 0
return self
def __next__(self):
if self.idx_in_epoch == 0 and CallableSampleIterator.first_value is not None:
result = CallableSampleIterator.first_value
CallableSampleIterator.first_value = None
else:
# There is no notion of epochs when iterating over DALI Dataset
# as the "raise" policy is not supported, so we use epoch 0 only.
idx = types.SampleInfo(self.idx_in_epoch, self.idx_in_batch, self.iteration, 0)
result = self.source(idx)
self.idx_in_epoch += 1
self.idx_in_batch += 1
if self.idx_in_batch == batch_size:
self.idx_in_batch = 0
self.iteration += 1
return sample_to_numpy(result, _tf_sample_error_msg)
return CallableSampleIterator, dtype, shape
def get_iterable_from_callback(source_desc: SourceDescription, is_batched):
"""Transform callback that doesn't accept arguments into iterable
"""
print("get_iterable_from_callback")
first = source_desc.source()
dtype, shape = _inspect_data(first, is_batched)
class CallableIterator:
first_value = first
def __init__(self):
self.source = source_desc.source
def __iter__(self):
return self
def __next__(self):
if CallableIterator.first_value is not None:
result = CallableIterator.first_value
CallableIterator.first_value = None
else:
result = self.source()
if is_batched:
return batch_to_numpy(result, _tf_batch_error_msg,
non_uniform_str=_tf_uniform_error_msg)
else:
return sample_to_numpy(result, _tf_sample_error_msg)
return CallableIterator, dtype, shape
def get_iterable_from_iterable_or_generator(source_desc: SourceDescription, is_batched):
"""Wrap iterable or generator function into another iterable while peeking the first element
If the source is generator function it must be called first.
"""
if source_desc.kind == SourceKind.GENERATOR_FUNC:
first_iter = iter(source_desc.source())
else:
first_iter = iter(source_desc.source)
first = next(first_iter)
dtype, shape = _inspect_data(first, is_batched)
class PeekFirstGenerator:
first_iterator = first_iter
first_value = first
def __init__(self):
self.source = source_desc.source
def __iter__(self):
if PeekFirstGenerator.first_iterator is not None:
self.it = PeekFirstGenerator.first_iterator
PeekFirstGenerator.first_iterator = None
else:
if source_desc.kind == SourceKind.GENERATOR_FUNC:
self.it = iter(source_desc.source())
else:
self.it = iter(source_desc.source)
return self
def __next__(self):
if PeekFirstGenerator.first_value is not None:
result = PeekFirstGenerator.first_value
PeekFirstGenerator.first_value = None
else:
result = next(self.it)
if is_batched:
return batch_to_numpy(result, _tf_batch_error_msg,
non_uniform_str=_tf_uniform_error_msg)
else:
return sample_to_numpy(result, _tf_sample_error_msg)
return PeekFirstGenerator, dtype, shape
def _get_generator_from_source_desc(source_desc: SourceDescription, batch_size, is_batched):
"""Based on DALI source description create a generator function, type and shape specification
compatible with TF Generator Dataset.
Cycling is delegated to the dataset as some control of some cycling behaviour cannot be
realized in TF.
"""
if source_desc.kind == SourceKind.CALLABLE:
if source_desc.has_inputs:
if is_batched:
return get_batch_iterable_from_callback(source_desc)
else:
return get_sample_iterable_from_callback(source_desc, batch_size)
else:
# No inputs, plain iteration
return get_iterable_from_callback(source_desc, is_batched)
else:
# Generator Func or iterable
return get_iterable_from_iterable_or_generator(source_desc, is_batched)
|
DALI-main
|
dali/python/nvidia/dali/_utils/external_source_impl.py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
DALI-main
|
dali/python/nvidia/dali/_utils/__init__.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from nvidia.dali import backend as _b
from nvidia.dali import internal as _internal
from nvidia.dali import ops as _ops
from nvidia.dali import tensors as _tensors
from nvidia.dali import types as _types
from nvidia.dali.external_source import _prep_data_for_feed_input
# Classification of eager operators. Operators not assigned to any class are exposed as stateless.
# If you created a new operator and it is not stateless you should add it to the appropriate set.
# You should also add a coverage test for it in `dali/test/python/test_eager_coverage.py`.
# Stateful operators - rely on the internal state (return different outputs across iterations).
_stateful_operators = {
'decoders__ImageRandomCrop',
'noise__Gaussian',
'noise__SaltAndPepper',
'noise__Shot',
'segmentation__RandomMaskPixel',
'segmentation__RandomObjectBBox',
'FastResizeCropMirror',
'Jitter',
'ROIRandomCrop',
'RandomBBoxCrop',
'RandomResizedCrop',
'ResizeCropMirror',
'random__CoinFlip',
'random__Normal',
'random__Uniform',
'BatchPermutation',
}
# Iterator operators - Python iterators of readers.
_iterator_operators = {
'experimental__readers__Video',
'readers__COCO',
'readers__Caffe',
'readers__Caffe2',
'readers__File',
'readers__MXNet',
'readers__NemoAsr',
'readers__Numpy',
'readers__Sequence',
'readers__TFRecord',
'readers__Video',
'readers__VideoResize',
'readers__Webdataset',
}
# Operators not exposed in the eager mode.
_excluded_operators = {
'readers__TFRecord',
'TFRecordReader',
'PythonFunction',
'DLTensorPythonFunction',
'TorchPythonFunction',
'NumbaFunction',
}
def _transform_data_to_tensorlist(data, batch_size, layout=None, device_id=-1):
data = _prep_data_for_feed_input(data, batch_size, layout, device_id)
if isinstance(data, list):
if isinstance(data[0], _tensors.TensorGPU):
data = _tensors.TensorListGPU(data, layout or "")
else:
data = _tensors.TensorListCPU(data, layout or "")
return data
class _Classification:
"""Classification of data's device and whether it is a batch.
Based on data type determines if data should be treated as a batch and with which device.
If the type can be recognized as a batch without being falsely categorized as such, it is.
This includes lists of supported tensor-like objects e.g. numpy arrays (the only list not
treated as a batch is a list of objects of primitive types), :class:`DataNodeDebug` and
TensorLists.
Args:
data: Data to be classified.
type_name (str): Representation of argument type (input or keyword).
arg_constant_len (int): Only applicable for argument inputs that are of array type
(e.g. numpy array). If -1 does not modify the data. For positive value works like
`:class:ops.Constant`, repeats the data `arg_constant_len` times.
"""
def __init__(self, data, type_name, arg_constant_len=-1):
from nvidia.dali._debug_mode import DataNodeDebug
is_batch, device, extracted = self._classify_data(data, type_name, arg_constant_len)
self.is_batch = is_batch
self.device = device
self.data = extracted
self.was_data_node = isinstance(data, DataNodeDebug)
self.original = data
@staticmethod
def _classify_data(data, type_name, arg_constant_len):
"""Returns tuple (is_batch, device, unpacked data). """
from nvidia.dali._debug_mode import DataNodeDebug
def is_primitive_type(x):
return isinstance(x, (int, float, bool, str))
def classify_array_input(arr):
if _types._is_numpy_array(arr):
device = 'cpu'
elif _types._is_torch_tensor(arr):
device = 'gpu' if arr.is_cuda else 'cpu'
elif _types._is_mxnet_array(arr):
device = 'gpu' if 'gpu' in str(arr.context) else 'cpu'
else:
raise RuntimeError(f"Unsupported array type '{type(arr)}'.")
return False, device, arr
def classify_array_kwarg(arr):
if _types._is_torch_tensor(arr):
if arr.is_cuda:
arr = arr.cpu().numpy()
elif _types._is_mxnet_array(arr):
import mxnet as mx
if 'gpu' in str(arr.context):
arr = arr.copyto(mx.cpu())
elif not _types._is_numpy_array(arr):
raise RuntimeError(f"Unsupported array type '{type(arr)}'.")
arr = _types._preprocess_constant_array_type(arr)
arr = _tensors.TensorListCPU([_tensors.TensorCPU(arr)] * arg_constant_len)
return True, 'cpu', arr
if isinstance(data, list):
if len(data) == 0 or any([is_primitive_type(d) for d in data]):
return False, 'cpu', data
is_batch_list = []
device_list = []
data_list = []
for d in data:
is_batch, device, val = _Classification._classify_data(d, type_name, -1)
is_batch_list.append(is_batch)
device_list.append(device)
data_list.append(val)
if any([device != device_list[0] for device in device_list]):
raise RuntimeError(f'{type_name} has batches of data on CPU and on GPU, '
'which is not supported.')
if all(is_batch_list):
# Input set.
return is_batch_list, device_list[0], data_list
if not any(is_batch_list):
# Converting to TensorList.
return True, device_list[0], _transform_data_to_tensorlist(
data_list, len(data_list))
else:
raise RuntimeError(f'{type_name} has inconsistent batch classification.')
else:
if isinstance(data, DataNodeDebug):
return True, data.device, data.get()
if isinstance(data, _tensors.TensorListCPU):
return True, 'cpu', data
if isinstance(data, _tensors.TensorListGPU):
return True, 'gpu', data
if is_primitive_type(data) or isinstance(data, _tensors.TensorCPU):
return False, 'cpu', data
if _types._is_compatible_array_type(data):
if arg_constant_len > 0:
# For call argument input repeats data `arg_constant_len` times to match
# the ContantOp behavior.
return classify_array_kwarg(data)
else:
return classify_array_input(data)
if hasattr(data, '__cuda_array_interface__') or isinstance(data, _tensors.TensorGPU):
return False, 'gpu', data
return False, 'cpu', data
def _slice_tensorlist(data, size):
""" Constructs TensorList consisting of ``size`` first elements of ``data``. """
return type(data)(list(data)[:size], layout=data.layout())
def _arithm_op(name, *inputs):
""" Arithmetic operator function wrapper around ``eager.arithmetic_generic_op``. It is used
for implementation of eager operators that are injected to TensorLists and for eager math
operators.
"""
batch_size = _choose_batch_size(inputs)
inputs = [_Classification(input, f'Input {i}', arg_constant_len=batch_size).data
for i, input in enumerate(inputs)]
categories_idxs, inputs, integers, reals = _ops._group_inputs(
inputs, edge_type=(_tensors.TensorListCPU, _tensors.TensorListGPU))
input_desc = _ops._generate_input_desc(categories_idxs, integers, reals)
if any(isinstance(input, _tensors.TensorListGPU) for input in inputs):
device = 'gpu'
else:
device = 'cpu'
if device == "gpu":
inputs = list(input._as_gpu() if isinstance(
input, _tensors.TensorListCPU) else input for input in inputs)
init_args = {
'device': device,
'expression_desc': f'{name}({input_desc})',
'integer_constants': integers,
'real_constants': reals,
}
from nvidia.dali.experimental.eager import arithmetic_generic_op
return arithmetic_generic_op(*inputs, **init_args)
# Implementations of TensorList operators, they are used directly in the backend.
def _add(self, other):
return _arithm_op('add', self, other)
def _radd(self, other):
return _arithm_op('add', other, self)
def _sub(self, other):
return _arithm_op('sub', self, other)
def _rsub(self, other):
return _arithm_op('sub', other, self)
def _mul(self, other):
return _arithm_op('mul', self, other)
def _rmul(self, other):
return _arithm_op('mul', other, self)
def _pow(self, other):
return _arithm_op('pow', self, other)
def _rpow(self, other):
return _arithm_op('pow', other, self)
def _truediv(self, other):
return _arithm_op('fdiv', self, other)
def _rtruediv(self, other):
return _arithm_op('fdiv', other, self)
def _floordiv(self, other):
return _arithm_op('div', self, other)
def _rfloordiv(self, other):
return _arithm_op('div', other, self)
def _neg(self):
return _arithm_op('minus', self)
def _eq(self, other):
return _arithm_op('eq', self, other)
def _ne(self, other):
return _arithm_op('neq', self, other)
def _lt(self, other):
return _arithm_op('lt', self, other)
def _le(self, other):
return _arithm_op('leq', self, other)
def _gt(self, other):
return _arithm_op('gt', self, other)
def _ge(self, other):
return _arithm_op('geq', self, other)
def _and(self, other):
return _arithm_op('bitand', self, other)
def _rand(self, other):
return _arithm_op('bitand', other, self)
def _or(self, other):
return _arithm_op('bitor', self, other)
def _ror(self, other):
return _arithm_op('bitor', other, self)
def _xor(self, other):
return _arithm_op('bitxor', self, other)
def _rxor(self, other):
return _arithm_op('bitxor', other, self)
_stateless_operators_cache = {}
def _create_backend_op(spec, device, num_inputs, num_outputs, call_args_names, op_name):
inp_device = 'cpu' if device == 'mixed' else device
out_device = 'gpu' if device == 'mixed' else device
for i in range(num_inputs):
spec.AddInput(op_name + f'[{i}]', inp_device)
for i in range(num_outputs):
spec.AddOutput(op_name + f'_out[{i}]', out_device)
for arg_name in call_args_names:
spec.AddArgumentInput(arg_name, '')
if device == 'cpu':
backend_op = _b.EagerOperatorCPU(spec)
elif device == 'gpu':
backend_op = _b.EagerOperatorGPU(spec)
elif device == 'mixed':
backend_op = _b.EagerOperatorMixed(spec)
else:
raise ValueError(
f"Incorrect device type '{device}' in eager operator '{op_name}'.")
return backend_op
def _eager_op_object_factory(op_class, op_name):
""" Creates eager operator class to use with objective ops-like API. For completeness,
currently not used.
"""
class EagerOperator(op_class):
def __init__(self, **kwargs):
self._batch_size = getattr(kwargs, 'batch_size', -1)
# Workaround for batch size deduction in _prep_args as we don't have inputs yet.
kwargs['batch_size'] = 0
_, init_args, _ = _prep_args(
[], kwargs, op_name, op_name, _callable_op_factory.disqualified_arguments)
device_id = init_args.pop('device_id')
init_args.pop('max_batch_size')
super().__init__(**init_args)
self._spec.AddArg('device_id', device_id)
self.built = False
def __call__(self, *inputs, **kwargs):
inputs, init_args, call_args = _prep_args(
inputs, kwargs, op_name, op_name, _callable_op_factory.disqualified_arguments)
if not self.built:
num_outputs = self.schema.CalculateOutputs(
self._spec) + self.schema.CalculateAdditionalOutputs(self._spec)
self._spec.AddArg('max_batch_size', init_args['max_batch_size'])
self._backend_op = _create_backend_op(
self._spec, self._device, len(inputs), num_outputs, call_args.keys(), op_name)
self.built = True
output = self._backend_op(inputs, kwargs)
if len(output) == 1:
return output[0]
return output
return EagerOperator
def _expose_eager_op_as_object(op_class, submodule):
""" Exposes eager operators as objects. Can be used if we decide to change eager API from
functional to objective.
"""
op_name = op_class.schema_name
module = _internal.get_submodule('nvidia.dali.experimental.eager', submodule)
op = _eager_op_object_factory(op_class, op_name)
setattr(module, op_name, op)
def _eager_op_base_factory(op_class, op_name, num_inputs, call_args_names):
class EagerOperatorBase(op_class):
def __init__(self, *, max_batch_size, device_id, **kwargs):
super().__init__(**kwargs)
self._spec.AddArg('device_id', device_id)
self._spec.AddArg('max_batch_size', max_batch_size)
num_outputs = self.schema.CalculateOutputs(
self._spec) + self.schema.CalculateAdditionalOutputs(self._spec)
self._backend_op = _create_backend_op(
self._spec, self._device, num_inputs, num_outputs, call_args_names, op_name)
return EagerOperatorBase
def _create_module_class():
""" Creates a class imitating a module. Used for `rng_state` so we can have nested methods.
E.g. `rng_state.random.normal`.
"""
class Module:
@classmethod
def _submodule(cls, name):
""" Returns submodule, creates new if it does not exist. """
if name not in cls._submodules:
# Register a new submodule class (object representing submodule will be created in
# the rng_state's constructor).
cls._submodules[name] = _create_state_submodule(name)
return cls._submodules[name]
_submodules = {}
return Module
def _create_state_submodule(name):
""" Creates a class imitating a submodule. It can contain methods and nested submodules.
Used for submodules of rng_state, e.g. `rng_state.random`, `rng_state.noise`.
"""
class StateSubmodule(_create_module_class()):
def __init__(self, operator_cache, seed_generator):
self._operator_cache = operator_cache
self._seed_generator = seed_generator
for name, submodule_class in StateSubmodule._submodules.items():
# Adds nested submodules.
setattr(self, name, submodule_class(self._operator_cache, self._seed_generator))
__name__ = name
return StateSubmodule
def _callable_op_factory(op_class, op_name, num_inputs, call_args_names):
class EagerOperator(_eager_op_base_factory(op_class, op_name, num_inputs, call_args_names)):
def __call__(self, inputs, kwargs):
# Here all kwargs are supposed to be TensorLists.
output = self._backend_op(inputs, kwargs)
if len(output) == 1:
return output[0]
return output
return EagerOperator
_callable_op_factory.disqualified_arguments = {
'bytes_per_sample_hint',
'preserve',
'seed'
}
def _iterator_op_factory(op_class, op_name, num_inputs, call_args_names):
class EagerOperator(_eager_op_base_factory(op_class, op_name, num_inputs, call_args_names)):
def __init__(self, call_args, *, max_batch_size, **kwargs):
pad_last_batch = kwargs.get('pad_last_batch', False)
kwargs['pad_last_batch'] = True
super().__init__(max_batch_size=max_batch_size, **kwargs)
self._call_args = call_args
self._iter = 0
epoch_size = self._backend_op.reader_meta()['epoch_size']
self._num_iters = ((epoch_size + max_batch_size - 1) // max_batch_size)
# Size of the last batch in an epoch.
if pad_last_batch or epoch_size % max_batch_size == 0:
self._last_batch_size = max_batch_size
else:
self._last_batch_size = epoch_size % max_batch_size
assert isinstance(self._last_batch_size, int)
def __next__(self):
""" Iterates over dataset once per epoch (last batch may not be full). """
if self._iter == self._num_iters:
self._iter = 0
raise StopIteration
else:
self._iter += 1
outputs = self._backend_op([], self._call_args)
if self._iter == self._num_iters:
# Return potentially partial batch at the end of an epoch.
outputs = [_slice_tensorlist(tl_output, self._last_batch_size)
for tl_output in outputs]
if len(outputs) == 1:
outputs = outputs[0]
return outputs
def __iter__(self):
return self
def __len__(self):
return self._num_iters
return EagerOperator
_iterator_op_factory.disqualified_arguments = {
'bytes_per_sample_hint',
'preserve',
}
def _choose_device(op_name, wrapper_name, inputs, device_param):
"""Returns device type and device_id based on inputs and device_param."""
input_device = ''
if len(inputs) > 0:
if any(isinstance(input, _tensors.TensorListGPU) for input in inputs):
input_device = 'gpu:0'
else:
input_device = 'cpu'
if device_param is None:
# Select device type based on inputs.
device_param = input_device if input_device else 'cpu'
sep_pos = device_param.find(':')
# Separate device and device_id.
if sep_pos != -1:
device = device_param[:sep_pos]
device_id = int(device_param[sep_pos + 1:])
else:
device = device_param
device_id = 0
if device == 'cpu' and input_device == 'gpu':
raise ValueError("An operator with device='cpu' cannot accept GPU inputs.")
if device != 'cpu' and device != 'gpu':
raise ValueError(f"Incorrect device type '{device}'.")
if input_device == 'cpu' and device == 'gpu':
if op_name in _ops._mixed_ops:
device = 'mixed'
else:
raise ValueError(f"Operator '{wrapper_name}' not registered for mixed.")
return device, device_id
def _disqualify_arguments(op_name, kwargs, disqualified_args):
for key in disqualified_args:
if key in kwargs:
raise RuntimeError(f"Argument '{key}' is not supported by eager operator '{op_name}'.")
def _choose_batch_size(inputs, batch_size=-1):
"""Returns batch size based on inputs and batch_size parameter."""
if len(inputs) > 0:
input_batch_size = -1
for input in inputs:
if hasattr(input, '__len__'):
input_batch_size = len(input)
if isinstance(input, (_tensors.TensorListCPU, _tensors.TensorListGPU)):
# TensorList inputs have priority for choosing batch size.
break
if batch_size == -1:
if input_batch_size == -1:
raise RuntimeError("Could not deduce 'batch_size' from inputs.")
batch_size = input_batch_size
if input_batch_size != batch_size:
raise ValueError(
f"Requested batch_size={batch_size}, but input 0 has batch_size={input_batch_size}")
if batch_size == -1:
raise RuntimeError(
"Operators with no inputs need to have 'batch_size' parameter specified.")
return batch_size
def _prep_args(inputs, kwargs, op_name, wrapper_name, disqualified_arguments):
def _prep_inputs(inputs, batch_size):
inputs = list(inputs)
for i, input in enumerate(inputs):
if not isinstance(input, (_tensors.TensorListCPU, _tensors.TensorListGPU)):
inputs[i] = _transform_data_to_tensorlist(input, batch_size)
return inputs
def _prep_kwargs(kwargs, batch_size):
for key, value in kwargs.items():
kwargs[key] = _Classification(
value, f'Argument {key}', arg_constant_len=batch_size).data
return kwargs
_disqualify_arguments(wrapper_name, kwargs, disqualified_arguments)
# Preprocess kwargs to get batch_size.
batch_size = _choose_batch_size(inputs, kwargs.pop('batch_size', -1))
kwargs = _prep_kwargs(kwargs, batch_size)
init_args, call_args = _ops._separate_kwargs(kwargs, _tensors.TensorListCPU)
# Preprocess inputs, try to convert each input to TensorList.
inputs = _prep_inputs(inputs, batch_size)
init_args['max_batch_size'] = batch_size
init_args['device'], init_args['device_id'] = _choose_device(
op_name, wrapper_name, inputs, kwargs.get('device'))
return inputs, init_args, call_args
def _desc_call_args(inputs, args):
"""Returns string description of call arguments (inputs and input arguments) to use as part of
the caching key."""
return str([(inp.dtype, inp.layout(), len(inp[0].shape())) for inp in inputs]) + str(sorted(
[(key, value.dtype, value.layout(), len(value[0].shape())) for key, value in args.items()]))
def _gen_cache_key(op_name, inputs, init_args, call_args):
""" Creating cache key consisting of operator name, description of inputs, input arguments
and init args. Each call arg is described by dtype, layout and dim.
"""
return op_name + _desc_call_args(inputs, call_args) + str(sorted(init_args.items()))
def _wrap_stateless(op_class, op_name, wrapper_name):
"""Wraps stateless Eager Operator in a function. Callable the same way as functions in fn API,
but directly with TensorLists.
"""
def wrapper(*inputs, **kwargs):
inputs, init_args, call_args = _prep_args(
inputs, kwargs, op_name, wrapper_name, _callable_op_factory.disqualified_arguments)
key = _gen_cache_key(op_name, inputs, init_args, call_args)
if key not in _stateless_operators_cache:
_stateless_operators_cache[key] = _callable_op_factory(
op_class, wrapper_name, len(inputs), call_args.keys())(**init_args)
return _stateless_operators_cache[key](inputs, call_args)
return wrapper
def _wrap_stateful(op_class, op_name, wrapper_name):
"""Wraps stateful Eager Operator as method of a class. Callable the same way as functions in
fn API, but directly with TensorLists.
"""
def wrapper(self, *inputs, **kwargs):
inputs, init_args, call_args = _prep_args(
inputs, kwargs, op_name, wrapper_name, _callable_op_factory.disqualified_arguments)
key = _gen_cache_key(op_name, inputs, init_args, call_args)
if key not in self._operator_cache:
# Creating a new operator instance with deterministically generated seed, so if we
# preserve the order of operator calls in different instances of rng_state, they
# return the same results.
seed = self._seed_generator.integers(_wrap_stateful.seed_upper_bound)
self._operator_cache[key] = _callable_op_factory(
op_class, wrapper_name, len(inputs), call_args.keys())(**init_args, seed=seed)
return self._operator_cache[key](inputs, call_args)
return wrapper
_wrap_stateful.seed_upper_bound = (1 << 31) - 1
def _wrap_iterator(op_class, op_name, wrapper_name):
"""Wraps reader Eager Operator in a Python iterator.
Example:
>>> for file, label in eager.readers.file(file_root=file_path, batch_size=8):
... # file and label are batches of size 8 (TensorLists).
... print(file)
"""
def wrapper(*inputs, **kwargs):
if len(inputs) > 0:
raise ValueError("Iterator type eager operators should not receive any inputs.")
inputs, init_args, call_args = _prep_args(
inputs, kwargs, op_name, wrapper_name, _iterator_op_factory.disqualified_arguments)
op = _iterator_op_factory(op_class, wrapper_name, len(inputs),
call_args.keys())(call_args, **init_args)
return op
return wrapper
def _get_rng_state_target_module(submodules):
""" Returns target module of rng_state. If a module did not exist, creates it. """
from nvidia.dali.experimental import eager
last_module = eager.rng_state
for cur_module_name in submodules:
# If nonexistent registers rng_state's submodule.
cur_module = last_module._submodule(cur_module_name)
last_module = cur_module
return last_module
def _get_eager_target_module(parent_module, submodules, make_hidden):
""" Returns target module inside ``parent_module`` if specified, otherwise inside eager. """
if parent_module is None:
# Exposing to nvidia.dali.experimental.eager module.
parent_module = _internal.get_submodule('nvidia.dali', 'experimental.eager')
else:
# Exposing to experimental.eager submodule of the specified parent module.
parent_module = _internal.get_submodule(
sys.modules[parent_module], 'experimental.eager')
if make_hidden:
op_module = _internal.get_submodule(parent_module, submodules[:-1])
else:
op_module = _internal.get_submodule(parent_module, submodules)
return op_module
def _wrap_eager_op(op_class, submodules, parent_module, wrapper_name, wrapper_doc, make_hidden):
""" Exposes eager operator to the appropriate module
(similar to :func:`nvidia.dali.fn._wrap_op`).
Uses ``op_class`` for preprocessing inputs and keyword arguments and filling OpSpec for backend
eager operators.
Args:
op_class: Op class to wrap.
submodule: Additional submodule (scope).
parent_module (str): If set to None, the wrapper is placed in nvidia.dali.experimental.eager
module, otherwise in a specified parent module.
wrapper_name: Wrapper name (the same as in fn API).
wrapper_doc (str): Documentation of the wrapper function.
make_hidden (bool): If operator is hidden, we should extract it from hidden submodule.
"""
op_name = op_class.schema_name
op_schema = _b.TryGetSchema(op_name)
if op_schema.IsDeprecated() or op_name in _excluded_operators:
return
elif op_name in _stateful_operators:
wrapper = _wrap_stateful(op_class, op_name, wrapper_name)
op_module = _get_rng_state_target_module(submodules)
else:
if op_name in _iterator_operators:
wrapper = _wrap_iterator(op_class, op_name, wrapper_name)
else:
# If operator is not stateful, generator, deprecated or excluded expose it as stateless.
wrapper = _wrap_stateless(op_class, op_name, wrapper_name)
op_module = _get_eager_target_module(parent_module, submodules, make_hidden)
if not hasattr(op_module, wrapper_name):
wrapper.__name__ = wrapper_name
wrapper.__qualname__ = wrapper_name
wrapper.__doc__ = wrapper_doc
wrapper._schema_name = op_name
if submodules:
wrapper.__module__ = op_module.__name__
setattr(op_module, wrapper_name, wrapper)
|
DALI-main
|
dali/python/nvidia/dali/_utils/eager_utils.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
def _is_marked_autoserializable(object):
return getattr(object, '_is_autoserialize', False)
def _discover_autoserialize(module, visited):
"""
Traverses a module tree given by the head module and
returns all functions that are marked with ``@autoserialize`` decorator.
:param module: Module currently searched.
:param visited: Paths to the ``__init__.py`` of the modules already searched.
:return: All functions that are marked with ``@autoserialize`` decorator.
"""
assert module is not None
ret = []
try:
module_members = inspect.getmembers(module)
except (ModuleNotFoundError, ImportError):
# If any module can't be inspected, DALI will not be able to find the @autoserialize
# anyway. We can just skip this module.
return ret
modules = []
for name, path in module_members:
obj = getattr(module, name, None)
if inspect.ismodule(obj) and path not in visited:
modules.append(name)
visited.append(path)
elif inspect.isfunction(obj) and _is_marked_autoserializable(obj):
ret.append(obj)
for mod in modules:
ret.extend(_discover_autoserialize(getattr(module, mod, None), visited=visited))
return ret
def invoke_autoserialize(head_module, filename):
"""
Perform the autoserialization of a function marked by
:meth:`nvidia.dali.plugin.triton.autoserialize`.
Assuming, that user marked a function with ``@autoserialize`` decorator, the
``invoke_autoserialize`` is a utility function, which will actually perform
the autoserialization.
It discovers the ``@autoserialize`` function in a module tree denoted by provided
``head_module`` and saves the serialized DALI pipeline to the file in the ``filename`` path.
Only one ``@autoserialize`` function may exist in a given module tree.
:param head_module: Module, denoting the model tree in which the decorated function shall exist.
:param filename: Path to the file, where the output of serialization will be saved.
"""
autoserialize_functions = _discover_autoserialize(head_module, visited=[])
if len(autoserialize_functions) > 1:
raise RuntimeError(
f"Precisely one autoserialize function must exist in the module. "
f"Found {len(autoserialize_functions)}: {autoserialize_functions}.")
if len(autoserialize_functions) < 1:
raise RuntimeError(
"Precisely one autoserialize function must exist in the module. Found none.")
dali_pipeline = autoserialize_functions[0]
dali_pipeline().serialize(filename=filename)
|
DALI-main
|
dali/python/nvidia/dali/_utils/autoserialize.py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
_not_iterable = ()
_original_check = None
class _NotIterable:
def __iter__(self):
raise TypeError("The objects of type `", type(self), "` are not iterable.")
def _check_iterable(iterable, instance):
if isinstance(instance, _not_iterable):
return False
return _original_check(iterable, instance)
def _hook_iterable_check():
global _original_check
global _not_iterable
if _original_check is not None:
return
_original_check = type(collections.abc.Iterable).__instancecheck__
type(collections.abc.Iterable).__instancecheck__ = _check_iterable
if len(_not_iterable) == 0:
_not_iterable = (_NotIterable,)
def not_iterable(cls, add_iter=True):
"""Makes an object non-iterable by raising a TypeError in __iter__ and suppressing
the detection of the object as an instance of collections.abc.Iterable.
"""
_hook_iterable_check()
if add_iter:
cls.__iter__ = _NotIterable.__iter__
global _not_iterable
s = set(_not_iterable)
s.add(cls)
_not_iterable = tuple(s)
|
DALI-main
|
dali/python/nvidia/dali/_utils/hacks.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
DALI-main
|
dali/python/nvidia/dali/experimental/__init__.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import math # noqa: F401
from nvidia.dali._utils.eager_utils import _create_module_class
""" Eager module implements eager versions of standard DALI operators.
There are 3 main types of eager operators:
stateless: callable directly as functions. Example:
>>> dali.experimental.eager.crop(input, crop=(5, 5))
stateful: operators that require a state, used as method of the `rng_state`. Example:
>>> rng_state = dali.experimental.eager.rng_state(seed=42)
>>> rng_state.random.normal(shape=(10, 10), batch_size=8))
iterators: reader operators as python iterables. Example:
>>> for file, label in eager.readers.file(file_root=file_path, batch_size=8):
... # file and label are batches of size 8 (TensorLists).
... print(file)
Additionally eager implements:
math operators - `dali.experimental.eager.math`. Example:
>>> tl = dali.tensors.TensorListCPU(...)
>>> dali.experimental.eager.math.sqrt(tl)
direct arithmetic operators on TensorLists, enabled with `dali.experimental.eager.arithmetic`
as context-manager or as function with global setting.
"""
class _MetaArithmetic(type):
@property
def enabled(cls):
return cls._enabled
class arithmetic(metaclass=_MetaArithmetic):
""" Context-manager that enabled/disables arithmetic operators on TensorLists.
Can also be used as a function with global setting.
Examples:
>>> tl = dali.tensors.TensorListCPU(...)
>>> with dali.experimental.eager.arithmetic(enabled=True):
... out = tl + 1
>>> dali.experimental.eager.arithmetic(enabled=True)
>>> tl = dali.tensors.TensorListCPU(...)
>>> out = tl ** 2
"""
def __init__(self, enabled=True):
self.prev = arithmetic._enabled
arithmetic._enabled = enabled
@property
def enabled(self):
return type(self)._enabled
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
arithmetic._enabled = self.prev
__name__ = 'arithmetic'
_enabled = False
class rng_state(_create_module_class()):
""" Manager class for stateful operators. This object holds a cache of reusable operators.
Operators are initialized with deterministic seeds generated according to the ``seed`` argument
and are reused when you call the same operator with the same scalar parameters.
Example:
>>> eager_state = dali.experimental.eager.rng_state(seed=42)
>>> out1 = eager_state.random.normal(shape=[5, 5], batch_size=8)
>>> # Here we will reuse the same operator.
>>> out2 = eager_state.random.normal(shape=[5, 5], batch_size=8)
>>> # And now we will create a new operator with new seed.
>>> out3 = eager_state.random.normal(shape=[10, 10], batch_size=8)
"""
def __init__(self, seed=None):
import numpy as np
self._operator_cache = {}
self._seed_generator = np.random.default_rng(seed)
for name, submodule_class in rng_state._submodules.items():
# Create attributes imitating submodules, e.g. `random`, `noise`.
setattr(self, name, submodule_class(self._operator_cache, self._seed_generator))
__name__ = 'rng_state'
|
DALI-main
|
dali/python/nvidia/dali/experimental/eager/__init__.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali._utils.eager_utils import _arithm_op
# Eager version of math operators. Implements same operators as `nvidia.dali.math`,
# but working directly on TensorLists.
def sqrt(input):
return _arithm_op("sqrt", input)
def rsqrt(input):
return _arithm_op("rsqrt", input)
def cbrt(input):
return _arithm_op("cbrt", input)
def exp(input):
return _arithm_op("exp", input)
def log(input):
return _arithm_op("log", input)
def log2(input):
return _arithm_op("log2", input)
def log10(input):
return _arithm_op("log10", input)
def abs(input):
return _arithm_op("abs", input)
def fabs(input):
return _arithm_op("fabs", input)
def floor(input):
return _arithm_op("floor", input)
def ceil(input):
return _arithm_op("ceil", input)
def sin(input):
return _arithm_op("sin", input)
def cos(input):
return _arithm_op("cos", input)
def tan(input):
return _arithm_op("tan", input)
def asin(input):
return _arithm_op("asin", input)
def acos(input):
return _arithm_op("acos", input)
def atan(input):
return _arithm_op("atan", input)
def sinh(input):
return _arithm_op("sinh", input)
def cosh(input):
return _arithm_op("cosh", input)
def tanh(input):
return _arithm_op("tanh", input)
def asinh(input):
return _arithm_op("asinh", input)
def acosh(input):
return _arithm_op("acosh", input)
def atanh(input):
return _arithm_op("atanh", input)
def min(left, right):
return _arithm_op("min", left, right)
def max(left, right):
return _arithm_op("max", left, right)
def pow(base, exponent):
return _arithm_op("pow", base, exponent)
def fpow(base, exponent):
return _arithm_op("fpow", base, exponent)
def atan2(x, y):
return _arithm_op("atan2", x, y)
def clamp(value, lo, hi):
return _arithm_op("clamp", value, lo, hi)
|
DALI-main
|
dali/python/nvidia/dali/experimental/eager/math.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def autoserialize(dali_pipeline):
"""
Decorator, that marks a DALI pipeline (represented by :meth:`nvidia.dali.pipeline_def`) for
autoserialization in [DALI Backend's]
(https://github.com/triton-inference-server/dali_backend#dali-triton-backend) model repository.
For details about the autoserialization feature, please refer to the
[DALI Backend documentation]
(https://github.com/triton-inference-server/dali_backend#autoserialization).
Only a ``pipeline_def`` can be decorated with ``autoserialize``.
Only one ``pipeline_def`` may be decorated with ``autoserialize`` in a given program.
To perform autoserialization, please refer to :meth:`nvidia.dali._utils.invoke_autoserialize`.
For more information about Triton, please refer to
[Triton documentation]
(https://github.com/triton-inference-server/server#triton-inference-server).
:param dali_pipeline: DALI Python model definition (``pipeline_def``).
"""
if not getattr(dali_pipeline, "_is_pipeline_def", False):
raise TypeError("Only `@pipeline_def` can be decorated with `@triton.autoserialize`.")
dali_pipeline._is_autoserialize = True
return dali_pipeline
|
DALI-main
|
dali/python/nvidia/dali/plugin/triton.py
|
# Copyright (c) 2017-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tensorflow as tf
from tensorflow.python.data.util import nest
from tensorflow.python.framework import tensor_shape
from nvidia.dali import types
from nvidia.dali import internal as _internal
from nvidia.dali.external_source import _is_external_source, _has_external_source
from nvidia.dali.external_source import _is_external_source_with_callback
from nvidia.dali._utils.external_source_impl import _get_generator_from_source_desc
from nvidia.dali._utils.external_source_impl import _cycle_enabled
from distutils.version import LooseVersion
import warnings
from nvidia.dali_tf_plugin import dali_tf_plugin
from collections.abc import Mapping, Iterable
_dali_tf_module = dali_tf_plugin.load_dali_tf_plugin()
_dali_tf = _dali_tf_module.dali
_dali_tf.__doc__ = _dali_tf.__doc__ + """
Please keep in mind that TensorFlow allocates almost all available device memory by default.
This might cause errors in DALI due to insufficient memory. On how to change this behaviour
please look into the TensorFlow documentation, as it may differ based on your use case.
"""
_experimental_dataset_docstring = """Experimental variant of
:class:`~nvidia.dali.plugin.tf.DALIDataset`. This dataset adds support for input tf.data.Datasets.
Support for input tf.data.Datasets is available only for TensorFlow 2.4.1 and newer.
**Input dataset specification**
Each of the input datasets must be mapped to a :meth:`~nvidia.dali.fn.external_source` operator
that will represent the input to the DALI pipeline. In the pipeline the input is represented as
the ``name`` parameter of :meth:`~nvidia.dali.fn.external_source`. Input datasets must be provided
as a mapping from that ``name`` to the dataset object via the ``input_datasets`` dictionary
argument of DALIDatasetWithInputs.
**Per-sample and batch mode**
The input datasets can operate in per-sample mode or in batch mode.
In per-sample mode, the values produced by the source dataset are interpreted
as individual samples. The batch dimension is absent. For example, a 640x480 RGB image would
have a shape ``[480, 640, 3]``.
In batch mode, the tensors produced by the source dataset are interpreted as batches,
with an additional outer dimension denoting the samples in the batch. For example, a batch of
ten 640x480 RGB images would have a shape ``[10, 480, 640, 3]``.
In both cases (per-sample and batch mode), the layout of those inputs should be denoted as "HWC".
In per-sample mode DALIDataset will query the inputs dataset ``batch_size``-times to build a batch
that would be fed into the DALI Pipeline.
In per-sample mode, each sample produced by the input dataset can have a different shape,
but the number of dimension and the layout must remain constant.
**External Source with** ``source`` **parameter**
This experimental DALIDataset accepts pipelines with :meth:`~nvidia.dali.fn.external_source`
nodes that have ``source`` parameter specified.
In that case, the ``source`` will be converted automatically into appropriate
``tf.data.Dataset.from_generator`` dataset with correct placement and
``tf.data.experimental.copy_to_device`` directives.
Those nodes can also work in per-sample or in batch mode. The data in batch mode must be
a dense, uniform tensor (each sample has the same dimensions). Only CPU data is accepted.
This allows TensorFlow DALIDataset to work with most Pipelines that have External Source
``source`` already specified.
.. warning::
This class is experimental and its API might change without notice.
.. note::
External source nodes with ``num_outputs`` specified to any number are not
supported - this means that callbacks with multiple (tuple) outputs are not supported.
.. note::
External source ``cycle`` policy ``'raise'`` is not supported - the dataset is not restartable.
.. note::
External source ``cuda_stream`` parameter is ignored - ``source`` is supposed to return
CPU data and tf.data.Dataset inputs are handled internally.
.. note::
External source ``use_copy_kernel`` and ``blocking`` parameters are ignored.
.. note::
Setting ``no_copy`` on the external source nodes when defining the pipeline is considered
a no-op when used with DALI Dataset. The ``no_copy`` option is handled internally
and enabled automatically if possible.
.. note::
Parallel execution of external source callback provided via ``source`` is not supported.
The callback is executed via TensorFlow ``tf.data.Dataset.from_generator`` - the ``parallel``
and ``prefetch_queue_depth`` parameters are ignored.
The operator adds additional parameters to the ones supported by the
:class:`~nvidia.dali.plugin.tf.DALIDataset`:
Parameters
----------
input_datasets : dict[str, tf.data.Dataset] or
dict[str, nvidia.dali.plugin.tf.experimental.Input]
input datasets to the DALI Pipeline. It must be provided as a dictionary mapping from
the names of the ``External Source`` nodes to the datasets objects or to the
:meth:`~nvidia.dali.plugin.tf.experimental.Input` wrapper.
For example::
{
'tensor_input': tf.data.Dataset.from_tensors(tensor).repeat(),
'generator_input': tf.data.Dataset.from_generator(some_generator)
}
can be passed as ``input_datasets`` for Pipeline like::
@pipeline_def
def external_source_pipe():
input_0 = fn.external_source(name='tensor_input')
input_1 = fn.external_source(name='generator_input')
return fn.resize(input_1, resize_x=input_0)
Entries that use ``tf.data.Dataset`` directly, like::
{
'input': tf.data.Dataset.from_tensors(tensor)
}
are equivalent to following specification using
``nvidia.dali.plugin.tf.experimental.Input``::
{
'input' : nvidia.dali.plugin.tf.experimental.Input(
dataset=tf.data.Dataset.from_tensors(tensor),
layout=None,
batch=False)
}
This means that inputs, specified as ``tf.data.Dataset`` directly, are considered
sample inputs.
.. warning::
Input dataset must be placed on the same device as ``DALIDatasetWithInputs``.
If the input has different placement (for instance, input is placed on CPU, while
``DALIDatasetWithInputs`` is placed on GPU) the ``tf.data.experimental.copy_to_device``
with GPU argument must be first applied to input.
"""
_experimental_input_docstring = """Wrapper for an input passed to DALIDataset.
Allows to pass additional options that can override some of the ones specified
in the External Source node in the Python Pipeline object.
Passing None indicates, that the value should be looked up in the pipeline definition.
Parameters
----------
dataset : tf.data.Dataset
The dataset used as an input
layout : str, optional, default = None
Layout of the input. If None, the layout will be taken from the corresponding
External Source node in the Python Pipeline object. If both are provided,
the layouts must be the same.
If neither is provided, empty layout will be used.
batch: bool, optional, default = False
Batch mode of a given input. If None, the batch mode will be taken from the
corresponding External Source node in the Python Pipeline object.
If the ``batch = False``, the input dataset is considered sample input.
If the ``batch = True``, the input dataset is expected to return batches.
"""
def serialize_pipeline(pipeline):
try:
return pipeline.serialize()
except RuntimeError as e:
raise RuntimeError("Error during pipeline initialization. Note that some operators "
"(e.g. Python Operators) cannot be used with "
"TensorFlow Dataset API and DALIIterator.") from e
def DALIIteratorWrapper(pipeline=None,
serialized_pipeline=None,
sparse=[],
shapes=[],
dtypes=[],
batch_size=-1,
prefetch_queue_depth=2,
**kwargs):
"""
TF Plugin Wrapper
This operator works in the same way as DALI TensorFlow plugin, with the exception that it also
accepts Pipeline objects as an input, which are serialized internally. For more information,
see :meth:`nvidia.dali.plugin.tf.DALIRawIterator`.
"""
if type(prefetch_queue_depth) is dict:
exec_separated = True
cpu_prefetch_queue_depth = prefetch_queue_depth["cpu_size"]
gpu_prefetch_queue_depth = prefetch_queue_depth["gpu_size"]
elif type(prefetch_queue_depth) is int:
exec_separated = False
cpu_prefetch_queue_depth = -1 # dummy: wont' be used
gpu_prefetch_queue_depth = prefetch_queue_depth
if serialized_pipeline is None:
serialized_pipeline = serialize_pipeline(pipeline)
# if batch_size is not provided we need to extract if from the shape arg
if (not isinstance(shapes, Iterable) or len(shapes) == 0) and batch_size == -1:
raise Exception(
'shapes and batch_size arguments cannot be empty, '
'please provide at leas one shape argument element with the BATCH size '
'or set batch_size'
)
if len(sparse) > 0 and sparse[0] and batch_size == -1:
if isinstance(shapes[0], Iterable) and len(shapes[0]) == 1:
shapes[0] = (shapes[0][0], 1)
else:
shapes[0] = (shapes[0], 1)
# shapes and dtypes need to take into account that sparse tensor will produce 3 output tensors
new_dtypes = []
new_shapes = []
for i in range(len(dtypes)):
if i < len(sparse) and sparse[i]:
# indices type of sparse tensor is tf.int64
new_dtypes.append(tf.int64)
new_dtypes.append(dtypes[i])
# dense shape type of sparse tensor is tf.int64
new_dtypes.append(tf.int64)
if len(shapes) > i and len(shapes[i]) > 0:
new_shapes.append((shapes[i][0], 1))
new_shapes.append((shapes[i][0]))
else:
new_shapes.append(())
new_shapes.append(())
new_shapes.append(())
else:
new_dtypes.append(dtypes[i])
if len(shapes) > i:
new_shapes.append(shapes[i])
# gpu_prefetch_queue_depth correspond to the global queue depth in the uniform case
out = _dali_tf(serialized_pipeline=serialized_pipeline,
shapes=new_shapes,
dtypes=new_dtypes,
sparse=sparse,
batch_size=batch_size,
exec_separated=exec_separated,
gpu_prefetch_queue_depth=gpu_prefetch_queue_depth,
cpu_prefetch_queue_depth=cpu_prefetch_queue_depth,
**kwargs)
new_out = []
j = 0
for i in range(len(dtypes)):
if i < len(sparse) and sparse[i]:
new_out.append(
tf.SparseTensor(indices=out[j], values=out[j + 1], dense_shape=out[j + 2]))
j += 3
else:
new_out.append(out[j])
j += 1
return new_out
def DALIIterator():
return DALIIteratorWrapper
# Vanilla raw operator legacy
def DALIRawIterator():
return _dali_tf
def _get_tf_version():
return LooseVersion(tf.__version__)
MIN_TENSORFLOW_VERSION = LooseVersion('1.15')
def dataset_compatible_tensorflow():
"""Returns ``True`` if current TensorFlow version is compatible with DALIDataset."""
return LooseVersion(tf.__version__) >= MIN_TENSORFLOW_VERSION
def dataset_inputs_compatible_tensorflow():
"""Returns ``True`` if the current TensorFlow version is compatible with
experimental.DALIDatasetWithInputs and input Datasets can be used with DALI.
"""
return LooseVersion(tf.__version__) >= LooseVersion('2.4.1')
def dataset_distributed_compatible_tensorflow():
"""Returns ``True`` if the tf.distribute APIs for current TensorFlow version are compatible
with DALIDataset.
"""
return LooseVersion(tf.__version__) >= LooseVersion('2.5.0')
def _get_experimental():
# TODO(klecki): this is WAR only for experimental module
current_module = sys.modules[__name__]
experimental = _internal.get_submodule(current_module, "experimental")
return experimental
def _insert_experimental_member(member, name):
experimental_module = _get_experimental()
member.__module__ = experimental_module
setattr(experimental_module, name, member)
def _get_external_source_param(input_name, input_value, name_es_map, param_name):
"""Get value of the parameter `param_name` specified for the External Source node
named `input_name`. It can be specified either via `input_value` or in the op instance
passed in `name_es_map`.
Not `None` value in `input_value` overwrites the one specified in the Operator instances.
Otherwise, the one from pipeline definition (the op instance) is used.
Parameters
----------
input_name : str
Name of the input
input_value : Input, optional
Description of the input
name_es_map : dict[str, ExternalSource]
Mapping from the External Source names to operator nodes.
param_name : str
name of the parameter we want to access
"""
def get_param_from_pipe(input_name, name_es_map, param_name):
es_op = name_es_map[input_name]
# Check the OpInstance and the `_op`
try:
return getattr(es_op, "_" + param_name)
except AttributeError:
return getattr(es_op._op, "_" + param_name, None)
# We didn't get input through input_datasets
if input_value is None or getattr(input_value, param_name) is None:
return get_param_from_pipe(input_name, name_es_map, param_name)
else:
return getattr(input_value, param_name)
def _get_signature(dtype, shape):
# TODO(klecki): Find out how we can use ragged tensors for non-uniform batches
return tf.TensorSpec(shape=shape, dtype=dtype)
def _get_current_device_spec():
"""Best guess at checking the current device string in eager and graph mode.
Using callable in `with tf.device(...)` for Graph mode will probably break it.
The graph in use is assumed to be current default graph.
"""
if tf.executing_eagerly():
# We are not using this `tf.device` with `with ...`,
# so we do not change the context, it returns _EagerDeviceContext
dummy_context_manager = tf.device(None)
# Get the eager.context singleton instance for this thread
context = dummy_context_manager._ctx
# DeviceSpec
return context.device_spec
else:
# Get the default graf, we assume that it's the one in use
g = tf.compat.v1.get_default_graph()
# Get the top element of _UserDeviceSpec stack - `with tf.device()` pushes to the stack
# in graph mode.
spec = g._device_function_stack.peek_top_obj()
# Try to normalize to DeviceSpec
return tf.DeviceSpec.from_string(spec.display_name)
if dataset_compatible_tensorflow():
from tensorflow.python.framework import ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import structure
import functools
def dataset_options():
options = tf.data.Options()
options.experimental_optimization.apply_default_optimizations = False
if hasattr(options.experimental_optimization, 'autotune'):
options.experimental_optimization.autotune = False
else:
options.autotune.enabled = False
return options
class _DALIDatasetV2(dataset_ops.DatasetV2):
def __init__(
self,
pipeline,
output_dtypes=None,
output_shapes=None,
fail_on_device_mismatch=True,
*,
input_datasets=None,
batch_size=1,
num_threads=4,
device_id=0,
exec_separated=False,
prefetch_queue_depth=2,
cpu_prefetch_queue_depth=2,
gpu_prefetch_queue_depth=2,
dtypes=None,
shapes=None):
output_shapes = self._handle_deprecation(output_shapes, shapes, "shapes")
output_dtypes = self._handle_deprecation(output_dtypes, dtypes, "dtypes")
if not self._check_dtypes(output_dtypes, tf.DType):
raise TypeError("`output_dtypes` should be provided as single tf.DType value "
f"or a tuple of tf.DType values. Got value `{output_dtypes}` "
f"of the type `{type(output_dtypes)}`.")
if output_shapes is None:
output_shapes = nest.map_structure(lambda _: tensor_shape.TensorShape(None),
output_dtypes)
else:
output_shapes = nest.map_structure_up_to(output_dtypes, tensor_shape.as_shape,
output_shapes)
if not isinstance(output_dtypes, tuple):
output_dtypes = (output_dtypes, )
output_shapes = (output_shapes, )
output_classes = nest.map_structure(lambda _: ops.Tensor, output_dtypes)
self._pipeline_instance = pipeline # keep the live Pipeline object
self._pipeline_serialized = serialize_pipeline(pipeline)
self._batch_size = batch_size
self._num_threads = num_threads
if device_id is None:
device_id = types.CPU_ONLY_DEVICE_ID
self._device_id = device_id
self._exec_separated = exec_separated
self._prefetch_queue_depth = prefetch_queue_depth
self._cpu_prefetch_queue_depth = cpu_prefetch_queue_depth
self._gpu_prefetch_queue_depth = gpu_prefetch_queue_depth
self._output_shapes = output_shapes
self._output_dtypes = output_dtypes
self._fail_on_device_mismatch = fail_on_device_mismatch
self._setup_inputs(input_datasets)
self._structure = structure.convert_legacy_structure(self._output_dtypes,
self._output_shapes,
output_classes)
super(_DALIDatasetV2, self).__init__(self._as_variant_tensor())
def _input_lists_from_input_datasets(self, input_datasets, name_es_map):
"""Extract the input specification from the input_datasets dictionary.
Validate if the inputs exist in the pipeline and the types are correct
Returns
-------
list, list, list, list
input_datasets, input_names, input_layouts, input_batched
"""
if input_datasets is None:
return [], [], [], []
def _get_dataset(value):
if isinstance(value, dataset_ops.DatasetV2):
return value
else:
return value.dataset
in_datasets_list = []
in_names_list = []
in_layouts_list = []
in_batched_list = []
error_str = (
"`input_datasets` must be a dictionary that maps input names (the `name` "
"specified for External Source node in DALI pipeline) to input datasets "
"objects (`tf.data.Dataset`) or `nvidia.dali.plugin.tf.experimental.Input` wrapper "
"objects")
if not isinstance(input_datasets, Mapping):
raise TypeError(error_str + f", got: `{input_datasets}` of type: "
"{type(input_datasets)} instead.")
for input_name, input_value in input_datasets.items():
# keys are str
if not isinstance(input_name, str):
raise TypeError(error_str +
f". Expected the keys (representing the input names) to be of "
f"type `str`, got: `{input_name}` of type: "
f"{input_name} instead.")
# values are tf.data.Dataset or Input
is_dataset_only = isinstance(input_value, dataset_ops.DatasetV2)
experimental = _get_experimental()
if not is_dataset_only and not isinstance(input_value, experimental.Input):
raise TypeError(error_str +
". Expected the values of the dictionary (representing the "
"inputs) to be of type `tf.data.Dataset` or "
f"`nvidia.dali.plugin.tf.Input` got: `{input_value}` of type: "
f"{type(input_value)} instead.")
# there is External Source with name equal to `input_name`
if input_name not in name_es_map.keys():
raise ValueError("Did not find an External Source placeholder node with "
f"name='{input_name}' in the provided pipeline - required by "
"the name specified in the `input_datasets`. Names of "
"available placeholder External Source nodes are: "
f"{list(name_es_map.keys())}. "
"Placeholder nodes cannot have `source` argument specified.")
in_names_list.append(input_name)
in_datasets_list.append(_get_dataset(input_value))
if is_dataset_only:
# Set the defaults used in lookup
as_input = experimental.Input(input_value, layout=None, batch=False)
else:
as_input = input_value
# TODO(klecki): Do we want all Python-only ES parameters to be overridable here?
layout = _get_external_source_param(input_name, as_input, name_es_map, 'layout')
in_layouts_list.append(layout or "")
# Batched mode is supported by default
batched = _get_external_source_param(input_name, as_input, name_es_map, 'batch')
in_batched_list.append(batched if batched is not None else True)
return in_datasets_list, in_names_list, in_layouts_list, in_batched_list
def _input_lists_from_source(self, callbacked_es_map):
# TODO(klecki): Warn about this in the doc.
# We do it only when the users wants to use ExternalSource with `source` specified,
# as it has some additional limitations.
# Capture the device that DALI was placed, as we may need to copy the CPU callbacks
# to that device.
dali_device_spec = _get_current_device_spec()
is_dali_on_gpu = dali_device_spec.device_type == "GPU"
in_datasets_list = []
in_names_list = []
in_layouts_list = []
in_batched_list = []
for input_name, external_source in callbacked_es_map.items():
in_names_list.append(input_name)
layout = _get_external_source_param(input_name, None, callbacked_es_map, 'layout')
in_layouts_list.append(layout or "")
# Batched mode is supported by default
batched = _get_external_source_param(input_name, None, callbacked_es_map, 'batch')
in_batched_list.append(batched if batched is not None else True)
source_desc = external_source._op._source_desc
if source_desc.cycle == 'raise':
raise NotImplementedError(f"External Source node: '{input_name}' got argument "
"cycle='raise' which is not supported.")
# All generator datasets must be placed on CPU.
with tf.device('/cpu:0'):
tf_gen, dtype, shape = _get_generator_from_source_desc(
source_desc, self._batch_size, external_source._batch)
signature = _get_signature(dtype, shape)
dataset = tf.data.Dataset.from_generator(tf_gen, output_signature=signature)
if _cycle_enabled(source_desc.cycle):
dataset = dataset.repeat()
# if DALIDataset was placed on GPU, we need to add the copy targetting
# that device (with proper id).
if is_dali_on_gpu:
dataset = dataset.apply(
tf.data.experimental.copy_to_device(dali_device_spec.to_string()))
in_datasets_list.append(dataset)
return in_datasets_list, in_names_list, in_layouts_list, in_batched_list
def _setup_inputs(self, input_datasets):
"""Verify the input specification and assign it to private members in
normalized form.
"""
has_es = _has_external_source(self._pipeline_instance)
# If no inputs are specified, input handling is no-op
if input_datasets is None and not has_es:
self._input_datasets = ()
self._input_names = ()
self._input_layouts = ()
self._input_batched = ()
return
self._assert_pipeline_instance()
# To not check everywhere for None
if input_datasets is None:
input_datasets = {}
name_es_map, callbacked_es_map = self._get_name_es_instance_map()
inputs_from_dict = self._input_lists_from_input_datasets(input_datasets, name_es_map)
inputs_from_source = self._input_lists_from_source(callbacked_es_map)
# Check if someone passed an entry in `input_datasets` for the ES with callback
if not input_datasets.keys().isdisjoint(callbacked_es_map.keys()):
overlapped = input_datasets.keys().intersection(callbacked_es_map.keys())
raise ValueError("Double specification of External Source input is not allowed. "
f"External Source nodes named: `{overlapped}` got inputs specified"
" via `input_datasets` DALIDataset argument and ExternalSource "
"`source` argument at the same time.")
# We covered all inputs
non_matched = (set(name_es_map.keys()) - set(input_datasets.keys()) -
set(callbacked_es_map.keys()))
if len(non_matched) != 0:
raise ValueError("Found External Source nodes in the Pipeline, that were not "
"assigned any inputs. Nodes without inputs: \n"
f"{list(non_matched)}.\nNodes that were assigned inputs:\n"
f"{list(input_datasets.keys())}.")
self._input_datasets = tuple(inputs_from_dict[0] + inputs_from_source[0])
self._input_names = tuple(inputs_from_dict[1] + inputs_from_source[1])
self._input_layouts = tuple(inputs_from_dict[2] + inputs_from_source[2])
# Map it to integers, to pass as vector<int> instead of vector<bool> to C++
self._input_batched = tuple(int(b) for b in inputs_from_dict[3] + inputs_from_source[3])
def _assert_pipeline_instance(self):
"""Ensure that the pipeline is built, and check if the Python part is available.
"""
self._pipeline_instance.build()
if not self._pipeline_instance._py_graph_built and self._pipeline_instance._built:
raise ValueError("Deserialized pipelines cannot be used with `input_datasets`. "
"Please provide a pipeline that was created directly in Python "
"and not recreated from serialized one.")
def _assert_correct_external_sources(self, external_source):
"""Validate that the external source nodes used are properly configured"""
if external_source._op._num_outputs is not None:
raise ValueError("Found placeholder External Source node (without `source` "
"argument) in the Pipeline that was created with `num_outputs` "
"`num_outputs` parameter. Only single-output "
"(with `num_outputs=None`), named (with `name` argument "
"specified) External Source nodes are supported as inputs "
"placeholders for DALIDataset integration. "
"Alternatively, External Source can be used with `source` "
"parameter specified.")
if external_source._op._name is None:
raise ValueError("Found placeholder External Source node (without `source` "
"argument) in the Pipeline that was not named "
"(no `name` argument set). Only single-output "
"(with `num_outputs=None`), named (with `name` argument "
"specified) External Source nodes are supported as inputs "
"placeholders for DALIDataset integration. "
"Alternatively, External Source can be used with `source` "
"parameter specified.")
def _get_name_es_instance_map(self):
"""Return mappings between name of External Source and the op.
Returns
-------
mapping for placeholders nodes, mapping for nodes with Python source
Two mappings are returned, separating the placeholder nodes without a `source`
and nodes that got a `source` parameter.
"""
name_es = {}
name_es_with_callback = {}
for op in self._pipeline_instance._ops:
if _is_external_source_with_callback(op):
# use the internal op name (generated automatically in most cases)
name_es_with_callback[op.name] = op
elif _is_external_source(op):
self._assert_correct_external_sources(op)
# use the user provided name
name_es[op._op._name] = op
return name_es, name_es_with_callback
def _check_dtypes(self, values, expected_elem_type):
"""Check whether `values` is instance of `expected_elem_type` or tuple of
`expected_elem_type`.
TF doesn't treat list as a nesting type, but as a Tensor.
"""
if isinstance(values, expected_elem_type):
return True
elif isinstance(values, tuple) \
and all(isinstance(elem, expected_elem_type) for elem in values):
return True
else:
return False
def _handle_deprecation(self, supported_arg, deprecated_arg, name):
if deprecated_arg is not None:
if supported_arg is not None:
raise ValueError((
"Usage of `{name}` is deprecated in favor of `output_{name}`. "
"Both arguments were provided, but only `output_{name}` should be provided."
).format(name=name))
# show only this warning
warnings.warn(("Use of argument `{name}` is deprecated. Please use `output_{name}`"
" instead. `output_{name}` should be provided as a tuple"
" or a single value.").format(name=name), Warning, stacklevel=2)
if isinstance(deprecated_arg, list):
return tuple(deprecated_arg)
return deprecated_arg
else:
return supported_arg
@property
def element_spec(self):
return self._structure
@property
def _element_structure(self):
return self._structure
def _inputs(self):
# Apparently here TF is happy with a list
return nest.flatten(self._input_datasets)
def _as_variant_tensor(self):
return _dali_tf_module.dali_dataset(
# Experimental dataset inputs
nest.map_structure(lambda d: d._variant_tensor, self._input_datasets),
# Description of inputs
input_names=self._input_names,
input_layouts=self._input_layouts,
input_batched=self._input_batched,
# End of experimental inputs
pipeline=self._pipeline_serialized,
batch_size=self._batch_size,
num_threads=self._num_threads,
device_id=self._device_id,
exec_separated=self._exec_separated,
prefetch_queue_depth=self._prefetch_queue_depth,
cpu_prefetch_queue_depth=self._cpu_prefetch_queue_depth,
gpu_prefetch_queue_depth=self._gpu_prefetch_queue_depth,
output_shapes=self._output_shapes,
output_dtypes=self._output_dtypes,
fail_on_device_mismatch=self._fail_on_device_mismatch)
if _get_tf_version() < LooseVersion('2.0'):
class _DALIDatasetImpl(dataset_ops.DatasetV1Adapter):
@functools.wraps(_DALIDatasetV2.__init__)
def __init__(self, pipeline, **kwargs):
self._wrapped = _DALIDatasetV2(pipeline, **kwargs)
super(_DALIDatasetImpl, self).__init__(self._wrapped)
else:
_DALIDatasetImpl = _DALIDatasetV2
_experimental_kwargs = ['input_datasets']
class DALIDataset(dataset_ops._OptionsDataset):
@functools.wraps(_DALIDatasetV2.__init__)
def __init__(self, pipeline, **kwargs):
# TODO(klecki): Remove this when we move support for inputs from experimental.
for disallowed_kwarg in _experimental_kwargs:
if disallowed_kwarg in kwargs.keys():
raise TypeError(
f"__init__() got an unexpected keyword argument '{disallowed_kwarg}'. "
"Dataset inputs are allowed only in"
" 'experimental.DALIDatasetWithInputs'.")
# We detected External Source nodes in the Pipeline
if _has_external_source(pipeline):
raise ValueError("DALIDataset got a DALI pipeline containing External Source "
"operator nodes. External Source nodes can be used to express "
"placeholders for tf.data.Dataset inputs to DALI or to run "
"user-provided Python code via `source` parameter. Support for "
"Dataset inputs and External Source's `source` is allowed only "
"in 'experimental.DALIDatasetWithInputs'.")
dataset_impl = _DALIDatasetImpl(pipeline, **kwargs)
super(DALIDataset, self).__init__(dataset_impl, dataset_options())
else:
class DALIDataset:
def __init__(self,
pipeline,
output_dtypes=None,
output_shapes=None,
fail_on_device_mismatch=True,
*,
batch_size=1,
num_threads=4,
device_id=0,
exec_separated=False,
prefetch_queue_depth=2,
cpu_prefetch_queue_depth=2,
gpu_prefetch_queue_depth=2,
dtypes=None,
shapes=None):
raise RuntimeError('DALIDataset is not supported for detected version of TensorFlow. '
'DALIDataset supports versions: 1.15, 2.x family')
if dataset_inputs_compatible_tensorflow():
def _load_experimental_dataset():
class DALIDatasetWithInputs(dataset_ops._OptionsDataset):
@functools.wraps(_DALIDatasetV2.__init__)
def __init__(self, pipeline, **kwargs):
dataset_impl = _DALIDatasetImpl(pipeline, **kwargs)
super(DALIDatasetWithInputs, self).__init__(dataset_impl, dataset_options())
DALIDatasetWithInputs.__doc__ = _experimental_dataset_docstring
_insert_experimental_member(DALIDatasetWithInputs, "DALIDatasetWithInputs")
class Input:
def __init__(self, dataset, *, layout=None, batch=False):
if not isinstance(dataset, dataset_ops.DatasetV2):
raise TypeError(
("The inputs specified to DALIDataset must be instances of "
"type `tf.data.Dataset` got: `{}` of type: {} instead.").format(
dataset, type(dataset)))
self.dataset = dataset
self.layout = layout
self.batch = batch
Input.__doc__ = _experimental_input_docstring
_insert_experimental_member(Input, "Input")
_load_experimental_dataset()
else:
def _load_experimental_dataset():
class DALIDatasetWithInputs:
def __init__(self, *args, **kwargs):
raise RuntimeError('experimental.DALIDatasetWithInputs is not supported for '
'detected version of TensorFlow. DALIDataset supports '
'versions: 2.4.1 and above.')
DALIDatasetWithInputs.__doc__ = _experimental_dataset_docstring
_insert_experimental_member(DALIDatasetWithInputs, "DALIDatasetWithInputs")
class Input:
def __init__(self, *args, **kwargs):
pass
Input.__doc__ = _experimental_input_docstring
_insert_experimental_member(Input, "Input")
_load_experimental_dataset()
DALIDataset.__doc__ = """Creates a ``DALIDataset`` compatible with
`tf.data.Dataset <https://www.tensorflow.org/api_docs/python/tf/data/Dataset>`_ from a DALI
pipeline. It supports TensorFlow 1.15 and 2.x family.
``DALIDataset`` can be placed on CPU and GPU.
Please keep in mind that TensorFlow allocates almost all available device memory by default.
This might cause errors in DALI due to insufficient memory. On how to change this behaviour
please look into the TensorFlow documentation, as it may differ based on your use case.
.. warning::
Most TensorFlow Datasets have only CPU variant. To process GPU-placed ``DALIDataset`` by
other TensorFlow dataset you need to first copy it back to CPU using explicit
``tf.data.experimental.copy_to_device`` - roundtrip from CPU to GPU back to CPU would
probably degrade performance a lot and is thus discouraged.
Additionally, it is advised to not use datasets like ``repeat()`` or similar after
``DALIDataset``, which may interfere with DALI memory allocations and prefetching.
Parameters
----------
pipeline : :class:`nvidia.dali.Pipeline`
defining the data processing to be performed.
output_dtypes: tf.DType or tuple of tf.DType, default = None
expected output types
output_shapes: tuple of shapes, optional, default = None
expected output shapes. If provided, must match arity of the ``output_dtypes``.
When set to None, DALI will infer the shapes on its own.
Individual shapes can be also set to None or contain None to indicate unknown dimensions.
If specified must be compatible with shape returned from DALI Pipeline
and with ``batch_size`` argument which will be the outermost dimension of returned tensors.
In case of ``batch_size = 1`` it can be omitted in the shape.
DALI Dataset will try to match requested shape by squeezing 1-sized dimensions
from shape obtained from Pipeline.
fail_on_device_mismatch : bool, optional, default = True
When set to ``True`` runtime check will be performed to ensure DALI device and TF device
are both CPU or both GPU. In some contexts this check might be inaccurate. When set to
``False`` will skip the check but print additional logs to check the devices. Keep in mind
that this may allow hidden GPU to CPU copies in the workflow and impact performance.
batch_size : int, optional, default = 1
batch size of the pipeline.
num_threads : int, optional, default = 4
number of CPU threads used by the pipeline.
device_id : int, optional, default = 0
id of GPU used by the pipeline.
A None value for this parameter means that DALI should not use GPU nor CUDA runtime.
This limits the pipeline to only CPU operators but allows it to run on any
CPU capable machine.
exec_separated : bool, optional, default = False
Whether to execute the pipeline in a way that enables
overlapping CPU and GPU computation, typically resulting
in faster execution speed, but larger memory consumption.
prefetch_queue_depth : int, optional, default = 2
depth of the executor queue. Deeper queue makes DALI more
resistant to uneven execution time of each batch, but it also
consumes more memory for internal buffers.
Value will be used with ``exec_separated`` set to ``False``.
cpu_prefetch_queue_depth : int, optional, default = 2
depth of the executor cpu queue. Deeper queue makes DALI more
resistant to uneven execution time of each batch, but it also
consumes more memory for internal buffers.
Value will be used with ``exec_separated`` set to ``True``.
gpu_prefetch_queue_depth : int, optional, default = 2
depth of the executor gpu queue. Deeper queue makes DALI more
resistant to uneven execution time of each batch, but it also
consumes more memory for internal buffers.
Value will be used with ``exec_separated`` set to ``True``.
Returns
-------
``DALIDataset`` object based on DALI pipeline and compatible with ``tf.data.Dataset`` API.
"""
DALIIterator.__doc__ = DALIIteratorWrapper.__doc__
DALIRawIterator.__doc__ = _dali_tf.__doc__
|
DALI-main
|
dali/python/nvidia/dali/plugin/tf.py
|
# Copyright (c) 2017-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.backend import TensorGPU, TensorListGPU
from nvidia.dali import types
from nvidia.dali.plugin.base_iterator import _DaliBaseIterator
from nvidia.dali.plugin.base_iterator import LastBatchPolicy
import mxnet as mx
import ctypes
import numpy as np
from collections.abc import Iterable
##################################################
##################################################
# Common utils
##################################################
##################################################
# MXNet currently does not expose WaitToWrite C API call
# in Python API
def _wait_to_write(arr):
if not isinstance(arr, mx.nd.NDArray):
raise RuntimeError("Can only wait for NDArray")
mx.base._LIB.MXNDArrayWaitToWrite(arr.handle)
def feed_ndarray(dali_tensor, arr, cuda_stream=None):
"""
Copy contents of DALI tensor to MXNet's NDArray.
Parameters
----------
`dali_tensor` : nvidia.dali.backend.TensorCPU or nvidia.dali.backend.TensorGPU
Tensor from which to copy
`arr` : mxnet.nd.NDArray
Destination of the copy
`cuda_stream` : cudaStream_t handle or any value that can be cast to cudaStream_t.
CUDA stream to be used for the copy
(if not provided, an internal user stream will be selected)
In most cases, using the default internal user stream or stream 0
is expected.
"""
dali_type = types.to_numpy_type(dali_tensor.dtype)
assert dali_type == arr.dtype, ("The element type of DALI Tensor/TensorList"
" doesn't match the element type of the target MXNet "
"NDArray: {} vs {}".
format(dali_type, np.dtype(arr.dtype)))
# Wait until arr is no longer used by the engine
_wait_to_write(arr)
assert dali_tensor.shape() == list(arr.shape), ("Shapes do not match: DALI tensor has "
"shape {0}, but NDArray has shape {1}".
format(dali_tensor.shape(), list(arr.shape)))
# Get CTypes void pointer to the underlying memory held by arr
ptr = ctypes.c_void_p()
mx.base._LIB.MXNDArrayGetData(arr.handle, ctypes.byref(ptr))
cuda_stream = types._raw_cuda_stream(cuda_stream)
# Copy data from DALI tensor to ptr
if isinstance(dali_tensor, (TensorGPU, TensorListGPU)):
stream = None if cuda_stream is None else ctypes.c_void_p(cuda_stream)
dali_tensor.copy_to_external(ptr, stream, non_blocking=True)
else:
dali_tensor.copy_to_external(ptr)
class _DALIMXNetIteratorBase(mx.io.DataIter, _DaliBaseIterator):
"""
Base class with methods shared by both DALIGenericIterator and DALIGluonIterator.
"""
def __init__(self,
pipelines,
size=-1,
reader_name=None,
fill_last_batch=None,
last_batch_padded=False,
auto_reset=False,
last_batch_policy=LastBatchPolicy.FILL,
prepare_first_batch=True):
_DaliBaseIterator.__init__(self,
pipelines,
size,
reader_name,
auto_reset,
fill_last_batch,
last_batch_padded,
last_batch_policy,
prepare_first_batch=prepare_first_batch)
def next(self):
"""
Returns the next batch of data.
"""
return self.__next__()
def reset(self):
"""
Resets the iterator after the full epoch.
DALI iterators do not support resetting before the end of the epoch
and will ignore such request.
"""
_DaliBaseIterator.reset(self)
def __iter__(self):
_DaliBaseIterator.__iter__(self)
return self
def get_mx_array(shape, ctx=None, dtype=None):
# WAR
# ToDo (jlisiecki) - fix when upstream MXNet fixes this
# mx.nd.empty doesn't support np.longlong as mx.nd.zeros does, but it does np.int64
# which from our point of view is the same
if dtype == np.longlong:
dtype = np.int64
# mx.nd.empy doesn't handle scalaras as shape
if not isinstance(shape, Iterable):
shape = [shape]
return mx.nd.empty(shape, ctx, dtype)
###################################################
###################################################
# MXNet Sym API
###################################################
###################################################
class DALIGenericIterator(_DALIMXNetIteratorBase):
"""
General DALI iterator for MXNet. It can return any number of
outputs from the DALI pipeline in the form of MXNet's DataBatch
of NDArrays.
Parameters
----------
pipelines : list of nvidia.dali.Pipeline
List of pipelines to use
output_map : list of (str, str)
List of pairs (output_name, tag) which maps consecutive
outputs of DALI pipelines to proper field in MXNet's
DataBatch.
tag is one of DALIGenericIterator.DATA_TAG
and DALIGenericIterator.LABEL_TAG mapping given output
for data or label correspondingly.
output_names should be distinct.
size : int, default = -1
Number of samples in the shard for the wrapped pipeline (if there is more than
one it is a sum)
Providing -1 means that the iterator will work until StopIteration is raised
from the inside of iter_setup(). The options `last_batch_policy` and
`last_batch_padded` don't work in such case. It works with only one pipeline inside
the iterator.
Mutually exclusive with `reader_name` argument
reader_name : str, default = None
Name of the reader which will be queried to the shard size, number of shards and
all other properties necessary to count properly the number of relevant and padded
samples that iterator needs to deal with. It automatically sets
`last_batch_padded` accordingly to match
the reader's configuration.
data_layout : str, optional, default = 'NCHW'
Either 'NHWC' or 'NCHW' - layout of the pipeline outputs.
auto_reset : string or bool, optional, default = False
Whether the iterator resets itself for the next epoch or it requires reset() to
be called explicitly.
It can be one of the following values:
* ``"no"``, ``False`` or ``None`` - at the end of epoch StopIteration is raised
and reset() needs to be called
* ``"yes"`` or ``"True"``- at the end of epoch StopIteration is raised but reset()
is called internally automatically
squeeze_labels: (DEPRECATED) bool, optional, default = False
Whether the iterator should squeeze the labels before
copying them to the ndarray.
This argument is deprecated and will be removed from future releases.
dynamic_shape : any, optional,
Parameter used only for backward compatibility.
fill_last_batch : bool, optional, default = None
**Deprecated** Please use ``last_batch_policy`` instead
Whether to fill the last batch with data up to 'self.batch_size'.
The iterator would return the first integer multiple
of self._num_gpus * self.batch_size entries which exceeds 'size'.
Setting this flag to False will cause the iterator to return
exactly 'size' entries.
last_batch_policy: optional, default = LastBatchPolicy.FILL
What to do with the last batch when there are not enough samples in the epoch
to fully fill it. See :meth:`nvidia.dali.plugin.base_iterator.LastBatchPolicy`.
Both ``FILL`` and ``PARTIAL`` would return a full batch but the ``pad`` property
value of the returned array would differ.
last_batch_padded : bool, optional, default = False
Whether the last batch provided by DALI is padded with the last sample
or it just wraps up. In the conjunction with ``last_batch_policy`` it tells
if the iterator returning last batch with data only partially filled with
data from the current epoch is dropping padding samples or samples from
the next epoch (it doesn't literally drop but sets ``pad`` field of ndarray
so the following code could use it to drop the data). If set to ``False`` next
epoch will end sooner as data from it was consumed but dropped. If set to
True next epoch would be the same length as the first one. For this to happen,
the option `pad_last_batch` in the reader needs to be set to True as well.
It is overwritten when `reader_name` argument is provided
prepare_first_batch : bool, optional, default = True
Whether DALI should buffer the first batch right after the creation of the iterator,
so one batch is already prepared when the iterator is prompted for the data
Example
-------
With the data set ``[1,2,3,4,5,6,7]`` and the batch size 2:
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = True ->
last batch = ``[7, 7]`` and MXNet array property ``.pad=1``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = False ->
last batch = ``[7, 1]`` and MXNet array property ``.pad=1``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = True ->
last batch = ``[7, 7]`` and MXNet array property ``.pad=0``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = False ->
last batch = ``[7, 1]`` and MXNet array property ``.pad=0``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = True ->
last batch = ``[5, 6]``, next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = False ->
last batch = ``[5, 6]``, next iteration will return ``[2, 3]``
"""
def __init__(self,
pipelines,
output_map,
size=-1,
reader_name=None,
data_layout='NCHW',
fill_last_batch=None,
auto_reset=False,
squeeze_labels=True,
dynamic_shape=False,
last_batch_padded=False,
last_batch_policy=LastBatchPolicy.FILL,
prepare_first_batch=True):
# check the assert first as _DaliBaseIterator would run the prefetch
self._output_names_map = [x[0] for x in output_map]
self._output_categories_map = [x[1] for x in output_map]
self._output_categories = {DALIGenericIterator.DATA_TAG, DALIGenericIterator.LABEL_TAG}
assert set(self._output_categories_map) <= self._output_categories, \
"Only DATA_TAG and LABEL_TAG are allowed"
assert len(set(self._output_names_map)) == len(self._output_names_map), \
"output_names in output_map should be distinct"
self.output_map = output_map
super().__init__(pipelines,
size,
reader_name,
fill_last_batch,
last_batch_padded,
auto_reset,
last_batch_policy,
prepare_first_batch=prepare_first_batch)
self._squeeze_labels = squeeze_labels
self._first_batch = None
self._descriptors_populated = False
self._data_layout = data_layout
if self._prepare_first_batch:
try:
self._first_batch = DALIGenericIterator.__next__(self)
# call to `next` sets _ever_consumed to True but if we are just calling it from
# here we should set if to False again
self._ever_consumed = False
except StopIteration:
assert False, "It seems that there is no data in the pipeline. This may happen " \
"if `last_batch_policy` is set to PARTIAL and the requested batch size is " \
"greater than the shard size."
def __getattr__(self, key):
# these attributes are required by MXNet thus DALI needs to provide them
if key == 'provide_data' or key == 'provide_label':
# obtain the first batch to populate the metadata
try:
self._first_batch = DALIGenericIterator.__next__(self)
# this entries should be there thanks to the above call
return self.__dict__[key]
except StopIteration:
assert False, "It seems that there is no data in the pipeline. This may happen " \
"if `last_batch_policy` is set to PARTIAL and the requested batch size is " \
"greater than the shard size."
raise AttributeError
def _populate_descriptors(self, data_batch):
# populate metadata
if not self._descriptors_populated:
provide_data = []
provide_label = []
category_names = {key: [] for key in self._output_categories}
for name, category in self.output_map:
category_names[category].append(name)
for i, data in enumerate(data_batch[0].data):
data_shape = (data.shape[0] * self._num_gpus,) + data.shape[1:]
provide_data.append(mx.io.DataDesc(category_names[DALIGenericIterator.DATA_TAG][i],
data_shape, data.dtype,
layout=self._data_layout))
for i, label in enumerate(data_batch[0].label):
label_shape = (label.shape[0] * self._num_gpus,) + label.shape[1:]
provide_label.append(mx.io.DataDesc(
category_names[DALIGenericIterator.LABEL_TAG][i], label_shape, label.dtype))
self.__dict__['provide_data'] = provide_data
self.__dict__['provide_label'] = provide_label
self._descriptors_populated = True
def __next__(self):
self._ever_consumed = True
if self._first_batch is not None:
batch = self._first_batch
self._first_batch = None
return batch
# Gather outputs
outputs = self._get_outputs()
data_batches = [None for i in range(self._num_gpus)]
for i in range(self._num_gpus):
# MXNet wants batches with clear distinction between
# data and label entries, so segregate outputs into
# 2 categories
category_outputs = {key: [] for key in self._output_categories}
for j, out in enumerate(outputs[i]):
category_outputs[self._output_categories_map[j]].append(out)
# Change DALI TensorLists into Tensors
category_tensors = dict()
category_info = dict()
# For data proceed normally
category_tensors[DALIGenericIterator.DATA_TAG] = \
[x.as_tensor() for x in category_outputs[DALIGenericIterator.DATA_TAG]]
category_info[DALIGenericIterator.DATA_TAG] = \
[(x.shape(), types.to_numpy_type(x.dtype))
for x in category_tensors[DALIGenericIterator.DATA_TAG]]
# For labels we squeeze the tensors
category_tensors[DALIGenericIterator.LABEL_TAG] = \
[x.as_tensor() for x in category_outputs[DALIGenericIterator.LABEL_TAG]]
if self._squeeze_labels:
for label in category_tensors[DALIGenericIterator.LABEL_TAG]:
label.squeeze(-1) # Squeeze last dimension if necessary
category_info[DALIGenericIterator.LABEL_TAG] = \
[(x.shape(), types.to_numpy_type(x.dtype))
for x in category_tensors[DALIGenericIterator.LABEL_TAG]]
mx_gpu_device = mx.gpu(self._pipes[i].device_id)
mx_cpu_device = mx.cpu(0)
category_device = {key: [] for key in self._output_categories}
for category in self._output_categories:
for t in category_tensors[category]:
if type(t) is TensorGPU:
category_device[category].append(mx_gpu_device)
else:
category_device[category].append(mx_cpu_device)
d = []
labels = []
for j, (shape, dtype) in enumerate(category_info[DALIGenericIterator.DATA_TAG]):
d.append(get_mx_array(shape, category_device[DALIGenericIterator.DATA_TAG][j],
dtype=dtype))
for j, (shape, dtype) in enumerate(category_info[DALIGenericIterator.LABEL_TAG]):
labels.append(get_mx_array(shape,
category_device[DALIGenericIterator.LABEL_TAG][j],
dtype=dtype))
data_batches[i] = mx.io.DataBatch(data=d, label=labels)
d = data_batches[i].data
labels = data_batches[i].label
for j, d_arr in enumerate(d):
feed_ndarray(category_tensors[DALIGenericIterator.DATA_TAG][j], d_arr)
for j, l_arr in enumerate(labels):
feed_ndarray(category_tensors[DALIGenericIterator.LABEL_TAG][j], l_arr)
self._schedule_runs()
self._advance_and_check_drop_last()
if self._reader_name:
if_drop, left = self._remove_padded()
if np.any(if_drop):
left = [self.batch_size - labels for labels in left]
for i, to_pad in zip(range(self._num_gpus), left):
data_batches[i].pad = to_pad
else:
for batch in data_batches:
batch.pad = 0
else:
# padding the last batch
if self._last_batch_policy == LastBatchPolicy.PARTIAL and \
(self._counter > self._size) and \
self._size > 0:
# this is the last batch and we need to pad
overflow = self._counter - self._size
overflow_per_device = overflow // self._num_gpus
difference = self._num_gpus - (overflow % self._num_gpus)
for i in range(self._num_gpus):
if i < difference:
data_batches[i].pad = overflow_per_device
else:
data_batches[i].pad = overflow_per_device + 1
else:
for db in data_batches:
db.pad = 0
self._populate_descriptors(data_batches)
return data_batches
DATA_TAG = "data"
LABEL_TAG = "label"
class DALIClassificationIterator(DALIGenericIterator):
"""
DALI iterator for classification tasks for MXNet. It returns 2 outputs
(data and label) in the form of MXNet's DataBatch of NDArrays.
Calling
.. code-block:: python
DALIClassificationIterator(pipelines, reader_name, data_name, label_name, data_layout)
is equivalent to calling
.. code-block:: python
DALIGenericIterator(pipelines,
[(data_name, DALIClassificationIterator.DATA_TAG),
(label_name, DALIClassificationIterator.LABEL_TAG)],
reader_name,
data_layout)
Parameters
----------
pipelines : list of nvidia.dali.Pipeline
List of pipelines to use
size : int, default = -1
Number of samples in the shard for the wrapped pipeline (if there is more than
one it is a sum)
Providing -1 means that the iterator will work until StopIteration is raised
from the inside of iter_setup(). The options `last_batch_policy` and
`last_batch_padded`
don't work in such case. It works with only one pipeline inside
the iterator.
Mutually exclusive with `reader_name` argument
reader_name : str, default = None
Name of the reader which will be queried to the shard size, number of shards and
all other properties necessary to count properly the number of relevant and padded
samples that iterator needs to deal with. It automatically sets
`last_batch_padded` accordingly to match the reader's configuration.
data_name : str, optional, default = 'data'
Data name for provided symbols.
label_name : str, optional, default = 'softmax_label'
Label name for provided symbols.
data_layout : str, optional, default = 'NCHW'
Either 'NHWC' or 'NCHW' - layout of the pipeline outputs.
auto_reset : string or bool, optional, default = False
Whether the iterator resets itself for the next epoch or it requires reset() to be
called explicitly.
It can be one of the following values:
* ``"no"``, ``False`` or ``None`` - at the end of epoch StopIteration is raised
and reset() needs to be called
* ``"yes"`` or ``"True"``- at the end of epoch StopIteration is raised but reset()
is called internally automatically
squeeze_labels: (DEPRECATED) bool, optional, default = True
Whether the iterator should squeeze the labels before
copying them to the ndarray.
This argument is deprecated and will be removed from future releases.
dynamic_shape : any, optional,
Parameter used only for backward compatibility.
fill_last_batch : bool, optional, default = None
**Deprecated** Please use ``last_batch_policy`` instead
Whether to fill the last batch with data up to 'self.batch_size'.
The iterator would return the first integer multiple
of self._num_gpus * self.batch_size entries which exceeds 'size'.
Setting this flag to False will cause the iterator to return
exactly 'size' entries.
last_batch_policy: optional, default = LastBatchPolicy.FILL
What to do with the last batch when there are not enough samples in the epoch
to fully fill it. See :meth:`nvidia.dali.plugin.base_iterator.LastBatchPolicy`.
Both ``FILL`` and ``PARTIAL`` would return a full batch but the ``pad`` property
value of the returned array would differ.
last_batch_padded : bool, optional, default = False
Whether the last batch provided by DALI is padded with the last sample
or it just wraps up. In the conjunction with ``last_batch_policy`` it tells
if the iterator returning last batch with data only partially filled with
data from the current epoch is dropping padding samples or samples from
the next epoch (it doesn't literally drop but sets ``pad`` field of ndarray
so the following code could use it to drop the data). If set to ``False`` next
epoch will end sooner as data from it was consumed but dropped. If set to
True next epoch would be the same length as the first one. For this to happen,
the option `pad_last_batch` in the reader needs to be set to True as well.
It is overwritten when `reader_name` argument is provided
prepare_first_batch : bool, optional, default = True
Whether DALI should buffer the first batch right after the creation of the iterator,
so one batch is already prepared when the iterator is prompted for the data
Example
-------
With the data set ``[1,2,3,4,5,6,7]`` and the batch size 2:
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = True ->
last batch = ``[7, 7]`` and MXNet array property ``.pad=1``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = False ->
last batch = ``[7, 1]`` and MXNet array property ``.pad=1``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = True ->
last batch = ``[7, 7]`` and MXNet array property ``.pad=0``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = False ->
last batch = ``[7, 1]`` and MXNet array property ``.pad=0``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = True ->
last batch = ``[5, 6]``, next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = False ->
last batch = ``[5, 6]``, next iteration will return ``[2, 3]``
"""
def __init__(self,
pipelines,
size=-1,
reader_name=None,
data_name='data',
label_name='softmax_label',
data_layout='NCHW',
fill_last_batch=None,
auto_reset=False,
squeeze_labels=True,
dynamic_shape=False,
last_batch_padded=False,
last_batch_policy=LastBatchPolicy.FILL,
prepare_first_batch=True):
super(DALIClassificationIterator, self).__init__(pipelines,
[(data_name,
DALIClassificationIterator.DATA_TAG),
(label_name,
DALIClassificationIterator.LABEL_TAG)],
size,
reader_name=reader_name,
data_layout=data_layout,
fill_last_batch=fill_last_batch,
auto_reset=auto_reset,
squeeze_labels=squeeze_labels,
dynamic_shape=dynamic_shape,
last_batch_padded=last_batch_padded,
last_batch_policy=last_batch_policy,
prepare_first_batch=prepare_first_batch)
###############################################
###############################################
# Gluon API
###############################################
###############################################
class DALIGluonIterator(_DALIMXNetIteratorBase):
"""
General DALI iterator for MXNet with Gluon API. It can return any number of
outputs from the DALI pipeline in the form of per GPU tuples. These tuples consisting of
NDArrays (for outputs marked as DALIGluonIterator.DENSE_TAG) and list of NDArrays (for
output marked as DALIGluonIterator.SPARSE_TAG).
Parameters
----------
pipelines : list of nvidia.dali.Pipeline
List of pipelines to use
size : int, default = -1
Number of samples in the shard for the wrapped pipeline (if there is more than
one it is a sum)
Providing -1 means that the iterator will work until StopIteration is raised
from the inside of iter_setup(). The options `last_batch_policy` and
`last_batch_padded` don't work in such case. It works with only one pipeline inside
the iterator.
Mutually exclusive with `reader_name` argument
reader_name : str, default = None
Name of the reader which will be queried to the shard size, number of shards and
all other properties necessary to count properly the number of relevant and padded
samples that iterator needs to deal with. It automatically sets
`last_batch_padded` accordingly to match the reader's configuration.
output_types : list of str, optional, default = None
List of tags indicating whether the pipeline(s) output batch is
uniform (all the samples have the same size) or not. Batch output marked
as the former will be returned as a single NDArray, the latter
will be returned as a list of NDArray.
Must be either DALIGluonIterator.DENSE_TAG
or DALIGluonIterator.SPARSE_TAG.
Length of output_types must match the number of output of the pipeline(s).
If not set, all outputs are considered to be marked with
DALIGluonIterator.DENSE_TAG.
auto_reset : string or bool, optional, default = False
Whether the iterator resets itself for the next epoch or it requires reset() to be
called explicitly.
It can be one of the following values:
* ``"no"``, ``False`` or ``None`` - at the end of epoch StopIteration is raised
and reset() needs to be called
* ``"yes"`` or ``"True"``- at the end of epoch StopIteration is raised but reset()
is called internally automatically
fill_last_batch : bool, optional, default = None
**Deprecated** Please use ``last_batch_policy`` instead
Whether to fill the last batch with data up to 'self.batch_size'.
The iterator would return the first integer multiple
of self._num_gpus * self.batch_size entries which exceeds 'size'.
Setting this flag to False will cause the iterator to return
exactly 'size' entries.
last_batch_policy: optional, default = LastBatchPolicy.FILL
What to do with the last batch when there are not enough samples in the epoch
to fully fill it. See :meth:`nvidia.dali.plugin.base_iterator.LastBatchPolicy`
last_batch_padded : bool, optional, default = False
Whether the last batch provided by DALI is padded with the last sample
or it just wraps up. In the conjunction with ``last_batch_policy`` it tells
if the iterator returning last batch with data only partially filled with
data from the current epoch is dropping padding samples or samples from
the next epoch (it doesn't literally drop but sets ``pad`` field of ndarray
so the following code could use it to drop the data). If set to ``False`` next
epoch will end sooner as data from it was consumed but dropped. If set to
True next epoch would be the same length as the first one. For this to happen,
the option `pad_last_batch` in the reader needs to be set to True as well.
It is overwritten when `reader_name` argument is provided
prepare_first_batch : bool, optional, default = True
Whether DALI should buffer the first batch right after the creation of the iterator,
so one batch is already prepared when the iterator is prompted for the data
Example
-------
With the data set ``[1,2,3,4,5,6,7]`` and the batch size 2:
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = True ->
last batch = ``[7]``, next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = False ->
last batch = ``[7]``, next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = True ->
last batch = ``[7, 7]``, next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = False ->
last batch = ``[7, 1]``, next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = True ->
last batch = ``[5, 6]``, next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = False ->
last batch = ``[5, 6]``, next iteration will return ``[2, 3]``
"""
def __init__(self,
pipelines,
size=-1,
reader_name=None,
output_types=None,
auto_reset=False,
fill_last_batch=None,
last_batch_padded=False,
last_batch_policy=LastBatchPolicy.FILL,
prepare_first_batch=True):
# check the assert first as _DaliBaseIterator would run the prefetch
self._output_tags = {DALIGluonIterator.DENSE_TAG, DALIGluonIterator.SPARSE_TAG}
assert output_types is None or set(output_types) <= self._output_tags, \
"Only DENSE_TAG and SPARSE_TAG are allowed"
self._outputs_types = output_types
super(DALIGluonIterator, self).__init__(
pipelines,
size,
reader_name,
fill_last_batch,
last_batch_padded,
auto_reset,
last_batch_policy,
prepare_first_batch=prepare_first_batch)
self._first_batch = None
if self._prepare_first_batch:
try:
self._first_batch = self._first_batch = DALIGluonIterator.__next__(self)
# call to `next` sets _ever_consumed to True but if we are just calling it from
# here we should set if to False again
self._ever_consumed = False
except StopIteration:
assert False, "It seems that there is no data in the pipeline. This may happen " \
"if `last_batch_policy` is set to PARTIAL and the requested batch size is " \
"greater than the shard size."
def __next__(self):
self._ever_consumed = True
if self._first_batch is not None:
batch = self._first_batch
self._first_batch = None
return batch
# Gather outputs
dali_outputs = self._get_outputs()
data_batches = [None for i in range(self._num_gpus)]
for i in range(self._num_gpus):
output_elements = []
shapes = []
for j, out in enumerate(dali_outputs[i]):
if self._outputs_types is None or \
self._outputs_types[j] == DALIGluonIterator.DENSE_TAG:
output_elements.append(out.as_tensor())
shapes.append(output_elements[-1].shape())
else:
output_elements.append([out[sample_idx]
for sample_idx in range(self.batch_size)])
s = [t.shape() for t in output_elements[-1]]
shapes.append(s)
data_batches[i] = self._create_data_batch(output_elements, shapes,
self._pipes[i].device_id)
batch = data_batches[i]
# Copy data from DALI Tensors to MXNet NDArrays
for j, output_el in enumerate(output_elements):
if self._outputs_types is None or \
self._outputs_types[j] == DALIGluonIterator.DENSE_TAG:
feed_ndarray(output_el, batch[j])
else:
for sample_idx in range(self.batch_size):
feed_ndarray(output_el[sample_idx], batch[j][sample_idx])
batches = [[([sample for sample in output_el] if isinstance(output_el, list) else
output_el)
for output_el in batch]
for batch in data_batches]
self._schedule_runs()
self._advance_and_check_drop_last()
if self._reader_name:
if_drop, left = self._remove_padded()
if np.any(if_drop):
output = []
for batch, to_copy in zip(batches, left):
batch = batch.copy()
for element_idx in range(len(batch)):
batch[element_idx] = batch[element_idx][0:to_copy]
output.append(batch)
return output
else:
if self._last_batch_policy == LastBatchPolicy.PARTIAL and \
(self._counter > self._size) and self._size > 0:
# First calculate how much data is required to return exactly self._size entries.
diff = self._num_gpus * self.batch_size - (self._counter - self._size)
# Figure out how many GPUs to grab from.
numGPUs_tograb = int(np.ceil(diff / self.batch_size))
# Figure out how many results to grab from the last GPU (as a fractional GPU
# batch may be required to bring us right up to self._size).
mod_diff = diff % self.batch_size
data_fromlastGPU = mod_diff if mod_diff else self.batch_size
# Grab the relevant data.
# 1) Grab everything from the relevant GPUs.
# 2) Grab the right data from the last GPU.
# 3) Append data together correctly and return.
output = batches[0:numGPUs_tograb]
output[-1] = output[-1].copy()
for element_idx in range(len(output[-1])):
output[-1][element_idx] = output[-1][element_idx][0:data_fromlastGPU]
return output
return batches
def _create_data_batch(self, output_elements, shapes, device_id):
mx_gpu_device = mx.gpu(device_id)
mx_cpu_device = mx.cpu(0)
new_batch = []
for j, output_el in enumerate(output_elements):
first_t = output_el if self._outputs_types is None or \
self._outputs_types[j] == DALIGluonIterator.DENSE_TAG else \
output_el[0]
dtype = types.to_numpy_type(first_t.dtype)
device = mx_gpu_device if type(first_t) is TensorGPU else mx_cpu_device
if self._outputs_types is None or self._outputs_types[j] == DALIGluonIterator.DENSE_TAG:
new_batch.append(get_mx_array(shapes[j], device, dtype=dtype))
else:
lables = []
for sample_idx in range(self.batch_size):
lables.append(get_mx_array(shapes[j][sample_idx], device, dtype=dtype))
new_batch.append(lables)
return new_batch
DENSE_TAG = "dense"
SPARSE_TAG = "sparse"
|
DALI-main
|
dali/python/nvidia/dali/plugin/mxnet.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2017-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import math
import numpy as np
import paddle
from distutils.version import LooseVersion
from nvidia.dali import types
from nvidia.dali.backend import TensorListCPU, TensorGPU, TensorListGPU
from nvidia.dali.plugin.base_iterator import _DaliBaseIterator
from nvidia.dali.plugin.base_iterator import LastBatchPolicy
assert LooseVersion(paddle.__version__) == LooseVersion('0.0.0') or \
LooseVersion(paddle.__version__) >= LooseVersion('2.0.0'), \
"DALI PaddlePaddle support requires Paddle develop or release >= 2.0.0"
dtype_map = {
types.DALIDataType.BOOL: paddle.framework.core.VarDesc.VarType.BOOL,
types.DALIDataType.FLOAT: paddle.framework.core.VarDesc.VarType.FP32,
types.DALIDataType.FLOAT64: paddle.framework.core.VarDesc.VarType.FP64,
types.DALIDataType.FLOAT16: paddle.framework.core.VarDesc.VarType.FP16,
types.DALIDataType.UINT8: paddle.framework.core.VarDesc.VarType.UINT8,
types.DALIDataType.INT8: paddle.framework.core.VarDesc.VarType.INT8,
types.DALIDataType.INT16: paddle.framework.core.VarDesc.VarType.INT16,
types.DALIDataType.INT32: paddle.framework.core.VarDesc.VarType.INT32,
types.DALIDataType.INT64: paddle.framework.core.VarDesc.VarType.INT64
}
def to_paddle_type(tensor):
r"""
Get paddle dtype for given tensor or tensor list
Args:
tensor: tensor or tensor list
Returns: paddle.framework.core.VarDesc.VarType
"""
return dtype_map[tensor.dtype]
def feed_ndarray(dali_tensor, ptr, cuda_stream=None):
"""
Copy contents of DALI tensor to Paddle's Tensor.
Parameters
----------
`dali_tensor` : dali.backend.TensorCPU or dali.backend.TensorGPU
Tensor from which to copy
`ptr` : LoDTensor data pointer
Destination of the copy
`cuda_stream` : cudaStream_t handle or any value that can be cast to cudaStream_t
CUDA stream to be used for the copy
(if not provided, an internal user stream will be selected)
"""
cuda_stream = types._raw_cuda_stream(cuda_stream)
c_type_pointer = ctypes.c_void_p(ptr)
if isinstance(dali_tensor, (TensorGPU, TensorListGPU)):
stream = None if cuda_stream is None else ctypes.c_void_p(cuda_stream)
dali_tensor.copy_to_external(c_type_pointer, stream, non_blocking=True)
else:
dali_tensor.copy_to_external(c_type_pointer)
return ptr
def recursive_length(tensor, lod_level):
def _recurse(data, result, level):
if level > 0:
if isinstance(data, (TensorListCPU, TensorListGPU)):
# handle tensor list
length = len(data)
result[0].append(length)
for i in range(length):
_recurse(data.at(i), result[1:], level - 1)
elif hasattr(data, 'shape'):
# handle dense GPU tensors and numpy.ndarray
shape = data.shape
if callable(shape):
shape = shape()
length = shape[0]
result[0].append(length)
for i in range(length):
_recurse(shape[1:], result[1:], level - 1)
else:
# handle shape
length = data[0]
result[0].append(length)
for i in range(length):
_recurse(data[1:], result[1:], level - 1)
seq_len = [[] for _ in range(lod_level)]
_recurse(tensor, seq_len, lod_level)
return seq_len
def lod_tensor_clip(lod_tensor, size):
output = paddle.framework.core.LoDTensor()
ndarray = np.array(lod_tensor)
seq_len = lod_tensor.recursive_sequence_lengths()
if not seq_len:
output.set(ndarray[0:size], paddle.CPUPlace())
else:
last_len = size
out_seq_len = []
for lengths in seq_len:
lengths = lengths[0:last_len]
out_seq_len.append(lengths)
last_len = sum(lengths)
output.set(ndarray[0:sum(out_seq_len[-1])], paddle.CPUPlace())
output.set_recursive_sequence_lengths(out_seq_len)
return output
class DALIGenericIterator(_DaliBaseIterator):
"""
General DALI iterator for Paddle. It can return any number of
outputs from the DALI pipeline in the form of Paddle's Tensors.
Parameters
----------
pipelines : list of nvidia.dali.Pipeline
List of pipelines to use
output_map : list of str or pair of type (str, int)
The strings maps consecutive outputs of DALI pipelines to
user specified name. Outputs will be returned from iterator
as dictionary of those names. Each name should be distinct.
Item can also be a pair of (str, int), where the int value
specifies the LoD level of the resulting LoDTensor.
size : int, default = -1
Number of samples in the shard for the wrapped pipeline (if there is more than
one it is a sum)
Providing -1 means that the iterator will work until StopIteration is raised
from the inside of iter_setup(). The options `last_batch_policy` and
`last_batch_padded` don't work in such case. It works with only one pipeline inside
the iterator.
Mutually exclusive with `reader_name` argument
reader_name : str, default = None
Name of the reader which will be queried to the shard size, number of shards and
all other properties necessary to count properly the number of relevant and padded
samples that iterator needs to deal with. It automatically sets
`last_batch_padded` accordingly to match the reader's configuration.
auto_reset : string or bool, optional, default = False
Whether the iterator resets itself for the next epoch or it requires reset() to be
called explicitly.
It can be one of the following values:
* ``"no"``, ``False`` or ``None`` - at the end of epoch StopIteration is raised
and reset() needs to be called
* ``"yes"`` or ``"True"``- at the end of epoch StopIteration is raised but reset()
is called internally automatically
dynamic_shape : any, optional,
Parameter used only for backward compatibility.
fill_last_batch : bool, optional, default = None
**Deprecated** Please use ``last_batch_policy`` instead
Whether to fill the last batch with data up to 'self.batch_size'.
The iterator would return the first integer multiple
of self._num_gpus * self.batch_size entries which exceeds 'size'.
Setting this flag to False will cause the iterator to return
exactly 'size' entries.
last_batch_policy: optional, default = LastBatchPolicy.FILL
What to do with the last batch when there are not enough samples in the epoch
to fully fill it. See :meth:`nvidia.dali.plugin.base_iterator.LastBatchPolicy`
last_batch_padded : bool, optional, default = False
Whether the last batch provided by DALI is padded with the last sample
or it just wraps up. In the conjunction with ``last_batch_policy`` it tells
if the iterator returning last batch with data only partially filled with
data from the current epoch is dropping padding samples or samples from
the next epoch. If set to ``False`` next
epoch will end sooner as data from it was consumed but dropped. If set to
True next epoch would be the same length as the first one. For this to happen,
the option `pad_last_batch` in the reader needs to be set to True as well.
It is overwritten when `reader_name` argument is provided
prepare_first_batch : bool, optional, default = True
Whether DALI should buffer the first batch right after the creation of the iterator,
so one batch is already prepared when the iterator is prompted for the data
Example
-------
With the data set ``[1,2,3,4,5,6,7]`` and the batch size 2:
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = True -> last batch = ``[7]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = False -> last batch = ``[7]``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = True -> last batch = ``[7, 7]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = False -> last batch = ``[7, 1]``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = True -> last batch = ``[5, 6]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = False -> last batch = ``[5, 6]``,
next iteration will return ``[2, 3]``
"""
def __init__(self,
pipelines,
output_map,
size=-1,
reader_name=None,
auto_reset=False,
fill_last_batch=None,
dynamic_shape=False,
last_batch_padded=False,
last_batch_policy=LastBatchPolicy.FILL,
prepare_first_batch=True):
normalized_map = {}
for v in output_map:
if isinstance(v, str):
normalized_map[v] = 0
else:
normalized_map[v[0]] = v[1]
self.normalized_map = normalized_map
# check the assert first as _DaliBaseIterator would run the prefetch
output_map = [isinstance(v, str) and v or v[0] for v in output_map]
assert len(set(output_map)) == len(output_map), \
"output_map names should be distinct"
self.output_map = output_map
_DaliBaseIterator.__init__(self,
pipelines,
size,
reader_name,
auto_reset,
fill_last_batch,
last_batch_padded,
last_batch_policy,
prepare_first_batch=prepare_first_batch)
self._counter = 0
self._first_batch = None
if self._prepare_first_batch:
try:
self._first_batch = DALIGenericIterator.__next__(self)
# call to `next` sets _ever_consumed to True but if we are just calling it from
# here we should set if to False again
self._ever_consumed = False
except StopIteration:
assert False, "It seems that there is no data in the pipeline. This may happen " \
"if `last_batch_policy` is set to PARTIAL and the requested batch size is " \
"greater than the shard size."
def __next__(self):
self._ever_consumed = True
if self._first_batch is not None:
batch = self._first_batch
self._first_batch = None
return batch
# Gather outputs
outputs = self._get_outputs()
data_batches = [None for i in range(self._num_gpus)]
for i in range(self._num_gpus):
dev_id = self._pipes[i].device_id
# Initialize dict for all output categories
category_outputs = dict()
# Segregate outputs into categories
for j, out in enumerate(outputs[i]):
category_outputs[self.output_map[j]] = out
pd_gpu_place = paddle.CUDAPlace(dev_id)
pd_cpu_place = paddle.CPUPlace()
category_pd_type = dict()
category_place = dict()
category_tensors = dict()
category_shapes = dict()
category_lengths = dict()
for cat, out in category_outputs.items():
lod = self.normalized_map[cat]
assert out.is_dense_tensor() or lod > 0, \
"non-dense tensor lists must have LoD > 0"
if lod > 0:
# +1 for batch dim
seq_len = recursive_length(out, lod + 1)[1:]
shape = out.at(0).shape
if callable(shape):
shape = shape()
shape = [sum(seq_len[-1])] + list(shape[lod:])
category_shapes[cat] = shape
category_lengths[cat] = seq_len
else:
out = out.as_tensor()
category_shapes[cat] = out.shape()
category_lengths[cat] = []
category_tensors[cat] = out
category_pd_type[cat] = to_paddle_type(out)
if isinstance(out, (TensorGPU, TensorListGPU)):
category_place[cat] = pd_gpu_place
else:
category_place[cat] = pd_cpu_place
pd_tensors = {}
for cat, tensor in category_tensors.items():
lod_tensor = paddle.framework.core.LoDTensor()
pd_tensors[cat] = lod_tensor
lod_tensor._set_dims(category_shapes[cat])
seq_len = category_lengths[cat]
lod_tensor.set_recursive_sequence_lengths(seq_len)
lod_tensor._mutable_data(category_place[cat],
category_pd_type[cat])
data_batches[i] = pd_tensors
stream = paddle.device.cuda.current_stream(dev_id).cuda_stream
for cat, tensor in category_tensors.items():
ptr = pd_tensors[cat]._mutable_data(category_place[cat],
category_pd_type[cat])
feed_ndarray(tensor, ptr, stream)
self._schedule_runs()
self._advance_and_check_drop_last()
if self._reader_name:
if_drop, left = self._remove_padded()
if np.any(if_drop):
output = []
for batch, to_copy in zip(data_batches, left):
batch = batch.copy()
for cat in self.output_map:
batch[cat] = lod_tensor_clip(batch[cat], to_copy)
output.append(batch)
return output
else:
if self._last_batch_policy == LastBatchPolicy.PARTIAL and \
(self._counter > self._size) and self._size > 0:
# First calculate how much data is required to
# return exactly self._size entries.
diff = self._num_gpus * self.batch_size - (self._counter
- self._size)
# Figure out how many GPUs to grab from.
num_gpus_to_grab = int(math.ceil(diff / self.batch_size))
# Figure out how many results to grab from the last GPU
# (as a fractional GPU batch may be required to bring us
# right up to self._size).
mod_diff = diff % self.batch_size
data_from_last_gpu = mod_diff if mod_diff else self.batch_size
# Grab the relevant data.
# 1) Grab everything from the relevant GPUs.
# 2) Grab the right data from the last GPU.
# 3) Append data together correctly and return.
output = data_batches[0:num_gpus_to_grab]
output[-1] = output[-1].copy()
for cat in self.output_map:
lod_tensor = output[-1][cat]
output[-1][cat] = lod_tensor_clip(
lod_tensor, data_from_last_gpu)
return output
return data_batches
class DALIClassificationIterator(DALIGenericIterator):
"""
DALI iterator for classification tasks for Paddle. It returns 2 outputs
(data and label) in the form of LoDTensor.
Calling
.. code-block:: python
DALIClassificationIterator(pipelines, reader_name)
is equivalent to calling
.. code-block:: python
DALIGenericIterator(pipelines, ["data", "label"], reader_name)
Parameters
----------
pipelines : list of nvidia.dali.Pipeline
List of pipelines to use
size : int, default = -1
Number of samples in the shard for the wrapped pipeline (if there is more than
one it is a sum)
Providing -1 means that the iterator will work until StopIteration is raised
from the inside of iter_setup(). The options `last_batch_policy` and
`last_batch_padded` don't work in such case. It works with only one pipeline inside
the iterator.
Mutually exclusive with `reader_name` argument
reader_name : str, default = None
Name of the reader which will be queried to the shard size, number of shards and
all other properties necessary to count properly the number of relevant and padded
samples that iterator needs to deal with. It automatically sets
`last_batch_padded` accordingly to match the reader's configuration.
auto_reset : string or bool, optional, default = False
Whether the iterator resets itself for the next epoch or it requires reset() to be
called explicitly.
It can be one of the following values:
* ``"no"``, ``False`` or ``None`` - at the end of epoch StopIteration is raised
and reset() needs to be called
* ``"yes"`` or ``"True"``- at the end of epoch StopIteration is raised but reset()
is called internally automatically
dynamic_shape : any, optional,
Parameter used only for backward compatibility.
fill_last_batch : bool, optional, default = None
**Deprecated** Please use ``last_batch_policy`` instead
Whether to fill the last batch with data up to 'self.batch_size'.
The iterator would return the first integer multiple
of self._num_gpus * self.batch_size entries which exceeds 'size'.
Setting this flag to False will cause the iterator to return
exactly 'size' entries.
last_batch_policy: optional, default = LastBatchPolicy.FILL
What to do with the last batch when there are not enough samples in the epoch
to fully fill it. See :meth:`nvidia.dali.plugin.base_iterator.LastBatchPolicy`
last_batch_padded : bool, optional, default = False
Whether the last batch provided by DALI is padded with the last sample
or it just wraps up. In the conjunction with ``last_batch_policy`` it tells
if the iterator returning last batch with data only partially filled with
data from the current epoch is dropping padding samples or samples from
the next epoch. If set to ``False`` next
epoch will end sooner as data from it was consumed but dropped. If set to
True next epoch would be the same length as the first one. For this to happen,
the option `pad_last_batch` in the reader needs to be set to True as well.
It is overwritten when `reader_name` argument is provided
prepare_first_batch : bool, optional, default = True
Whether DALI should buffer the first batch right after the creation of the iterator,
so one batch is already prepared when the iterator is prompted for the data
Example
-------
With the data set ``[1,2,3,4,5,6,7]`` and the batch size 2:
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = True -> last batch = ``[7]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = False -> last batch = ``[7]``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = True -> last batch = ``[7, 7]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = False -> last batch = ``[7, 1]``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = True -> last batch = ``[5, 6]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = False -> last batch = ``[5, 6]``,
next iteration will return ``[2, 3]``
"""
def __init__(self,
pipelines,
size=-1,
reader_name=None,
auto_reset=False,
fill_last_batch=None,
dynamic_shape=False,
last_batch_padded=False,
last_batch_policy=LastBatchPolicy.FILL,
prepare_first_batch=True):
super(DALIClassificationIterator, self).__init__(
pipelines, ["data", "label"], size, reader_name=reader_name,
auto_reset=auto_reset,
fill_last_batch=fill_last_batch,
dynamic_shape=dynamic_shape,
last_batch_padded=last_batch_padded,
last_batch_policy=last_batch_policy,
prepare_first_batch=prepare_first_batch)
|
DALI-main
|
dali/python/nvidia/dali/plugin/paddle.py
|
# Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
DALI-main
|
dali/python/nvidia/dali/plugin/__init__.py
|
# Copyright (c) 2017-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.backend import TensorGPU, TensorListGPU
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
from nvidia.dali import types
from nvidia.dali.plugin.base_iterator import _DaliBaseIterator
from nvidia.dali.plugin.base_iterator import LastBatchPolicy
import torch
import torch.utils.dlpack as torch_dlpack
import ctypes
import numpy as np
to_torch_type = {
types.DALIDataType.FLOAT: torch.float32,
types.DALIDataType.FLOAT64: torch.float64,
types.DALIDataType.FLOAT16: torch.float16,
types.DALIDataType.UINT8: torch.uint8,
types.DALIDataType.INT8: torch.int8,
types.DALIDataType.BOOL: torch.bool,
types.DALIDataType.INT16: torch.int16,
types.DALIDataType.INT32: torch.int32,
types.DALIDataType.INT64: torch.int64
}
def feed_ndarray(dali_tensor, arr, cuda_stream=None):
"""
Copy contents of DALI tensor to PyTorch's Tensor.
Parameters
----------
`dali_tensor` : nvidia.dali.backend.TensorCPU or nvidia.dali.backend.TensorGPU
Tensor from which to copy
`arr` : torch.Tensor
Destination of the copy
`cuda_stream` : torch.cuda.Stream, cudaStream_t or any value that can be cast to cudaStream_t.
CUDA stream to be used for the copy
(if not provided, an internal user stream will be selected)
In most cases, using pytorch's current stream is expected (for example,
if we are copying to a tensor allocated with torch.zeros(...))
"""
dali_type = to_torch_type[dali_tensor.dtype]
assert dali_type == arr.dtype, ("The element type of DALI Tensor/TensorList"
" doesn't match the element type of the target PyTorch Tensor: "
"{} vs {}".format(dali_type, arr.dtype))
assert dali_tensor.shape() == list(arr.size()), \
("Shapes do not match: DALI tensor has size {0}, but PyTorch Tensor has size {1}".
format(dali_tensor.shape(), list(arr.size())))
cuda_stream = types._raw_cuda_stream(cuda_stream)
# turn raw int to a c void pointer
c_type_pointer = ctypes.c_void_p(arr.data_ptr())
if isinstance(dali_tensor, (TensorGPU, TensorListGPU)):
stream = None if cuda_stream is None else ctypes.c_void_p(cuda_stream)
dali_tensor.copy_to_external(c_type_pointer, stream, non_blocking=True)
else:
dali_tensor.copy_to_external(c_type_pointer)
return arr
class DALIGenericIterator(_DaliBaseIterator):
"""
General DALI iterator for PyTorch. It can return any number of
outputs from the DALI pipeline in the form of PyTorch's Tensors.
Parameters
----------
pipelines : list of nvidia.dali.Pipeline
List of pipelines to use
output_map : list of str
List of strings which maps consecutive outputs
of DALI pipelines to user specified name.
Outputs will be returned from iterator as dictionary
of those names.
Each name should be distinct
size : int, default = -1
Number of samples in the shard for the wrapped pipeline (if there is more than
one it is a sum)
Providing -1 means that the iterator will work until StopIteration is raised
from the inside of iter_setup(). The options `last_batch_policy` and
`last_batch_padded` don't work in such case. It works with only one pipeline inside
the iterator.
Mutually exclusive with `reader_name` argument
reader_name : str, default = None
Name of the reader which will be queried to the shard size, number of shards and
all other properties necessary to count properly the number of relevant and padded
samples that iterator needs to deal with. It automatically sets
`last_batch_padded` accordingly to match the reader's configuration.
auto_reset : string or bool, optional, default = False
Whether the iterator resets itself for the next epoch or it requires reset() to be
called explicitly.
It can be one of the following values:
* ``"no"``, ``False`` or ``None`` - at the end of epoch StopIteration is raised
and reset() needs to be called
* ``"yes"`` or ``"True"``- at the end of epoch StopIteration is raised but reset()
is called internally automatically
dynamic_shape : any, optional,
Parameter used only for backward compatibility.
fill_last_batch : bool, optional, default = None
**Deprecated** Please use ``last_batch_policy`` instead
Whether to fill the last batch with data up to 'self.batch_size'.
The iterator would return the first integer multiple
of self._num_gpus * self.batch_size entries which exceeds 'size'.
Setting this flag to False will cause the iterator to return
exactly 'size' entries.
last_batch_policy: optional, default = LastBatchPolicy.FILL
What to do with the last batch when there are not enough samples in the epoch
to fully fill it. See :meth:`nvidia.dali.plugin.base_iterator.LastBatchPolicy`
last_batch_padded : bool, optional, default = False
Whether the last batch provided by DALI is padded with the last sample
or it just wraps up. In the conjunction with ``last_batch_policy`` it tells
if the iterator returning last batch with data only partially filled with
data from the current epoch is dropping padding samples or samples from
the next epoch. If set to ``False`` next
epoch will end sooner as data from it was consumed but dropped. If set to
True next epoch would be the same length as the first one. For this to happen,
the option `pad_last_batch` in the reader needs to be set to True as well.
It is overwritten when `reader_name` argument is provided
prepare_first_batch : bool, optional, default = True
Whether DALI should buffer the first batch right after the creation of the iterator,
so one batch is already prepared when the iterator is prompted for the data
Example
-------
With the data set ``[1,2,3,4,5,6,7]`` and the batch size 2:
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = True -> last batch = ``[7]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = False -> last batch = ``[7]``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = True -> last batch = ``[7, 7]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = False -> last batch = ``[7, 1]``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = True -> last batch = ``[5, 6]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = False -> last batch = ``[5, 6]``,
next iteration will return ``[2, 3]``
"""
def __init__(self,
pipelines,
output_map,
size=-1,
reader_name=None,
auto_reset=False,
fill_last_batch=None,
dynamic_shape=False,
last_batch_padded=False,
last_batch_policy=LastBatchPolicy.FILL,
prepare_first_batch=True):
# check the assert first as _DaliBaseIterator would run the prefetch
assert len(set(output_map)) == len(output_map), "output_map names should be distinct"
self._output_categories = set(output_map)
self.output_map = output_map
_DaliBaseIterator.__init__(self,
pipelines,
size,
reader_name,
auto_reset,
fill_last_batch,
last_batch_padded,
last_batch_policy,
prepare_first_batch=prepare_first_batch)
self._first_batch = None
if self._prepare_first_batch:
try:
self._first_batch = DALIGenericIterator.__next__(self)
# call to `next` sets _ever_consumed to True but if we are just calling it from
# here we should set if to False again
self._ever_consumed = False
except StopIteration:
assert False, "It seems that there is no data in the pipeline. This may happen " \
"if `last_batch_policy` is set to PARTIAL and the requested batch size is " \
"greater than the shard size."
def __next__(self):
self._ever_consumed = True
if self._first_batch is not None:
batch = self._first_batch
self._first_batch = None
return batch
# Gather outputs
outputs = self._get_outputs()
data_batches = [None for i in range(self._num_gpus)]
for i in range(self._num_gpus):
dev_id = self._pipes[i].device_id
# initialize dict for all output categories
category_outputs = dict()
# segregate outputs into categories
for j, out in enumerate(outputs[i]):
category_outputs[self.output_map[j]] = out
# Change DALI TensorLists into Tensors
category_tensors = dict()
category_shapes = dict()
for category, out in category_outputs.items():
category_tensors[category] = out.as_tensor()
category_shapes[category] = category_tensors[category].shape()
category_torch_type = dict()
category_device = dict()
torch_gpu_device = None
torch_cpu_device = torch.device('cpu')
# check category and device
for category in self._output_categories:
category_torch_type[category] = to_torch_type[category_tensors[category].dtype]
if type(category_tensors[category]) is TensorGPU:
if not torch_gpu_device:
torch_gpu_device = torch.device('cuda', dev_id)
category_device[category] = torch_gpu_device
else:
category_device[category] = torch_cpu_device
pyt_tensors = dict()
for category in self._output_categories:
pyt_tensors[category] = torch.empty(category_shapes[category],
dtype=category_torch_type[category],
device=category_device[category])
data_batches[i] = pyt_tensors
# Copy data from DALI Tensors to torch tensors
for category, tensor in category_tensors.items():
if isinstance(tensor, (TensorGPU, TensorListGPU)):
# Using same cuda_stream used by torch.zeros to set the memory
stream = torch.cuda.current_stream(device=pyt_tensors[category].device)
feed_ndarray(tensor, pyt_tensors[category], cuda_stream=stream)
else:
feed_ndarray(tensor, pyt_tensors[category])
self._schedule_runs()
self._advance_and_check_drop_last()
if self._reader_name:
if_drop, left = self._remove_padded()
if np.any(if_drop):
output = []
for batch, to_copy in zip(data_batches, left):
batch = batch.copy()
for category in self._output_categories:
batch[category] = batch[category][0:to_copy]
output.append(batch)
return output
else:
if self._last_batch_policy == LastBatchPolicy.PARTIAL and (
self._counter > self._size) and self._size > 0:
# First calculate how much data is required to return exactly self._size entries.
diff = self._num_gpus * self.batch_size - (self._counter - self._size)
# Figure out how many GPUs to grab from.
numGPUs_tograb = int(np.ceil(diff / self.batch_size))
# Figure out how many results to grab from the last GPU
# (as a fractional GPU batch may be required to bring us
# right up to self._size).
mod_diff = diff % self.batch_size
data_fromlastGPU = mod_diff if mod_diff else self.batch_size
# Grab the relevant data.
# 1) Grab everything from the relevant GPUs.
# 2) Grab the right data from the last GPU.
# 3) Append data together correctly and return.
output = data_batches[0:numGPUs_tograb]
output[-1] = output[-1].copy()
for category in self._output_categories:
output[-1][category] = output[-1][category][0:data_fromlastGPU]
return output
return data_batches
class DALIClassificationIterator(DALIGenericIterator):
"""
DALI iterator for classification tasks for PyTorch. It returns 2 outputs
(data and label) in the form of PyTorch's Tensor.
Calling
.. code-block:: python
DALIClassificationIterator(pipelines, reader_name)
is equivalent to calling
.. code-block:: python
DALIGenericIterator(pipelines, ["data", "label"], reader_name)
Parameters
----------
pipelines : list of nvidia.dali.Pipeline
List of pipelines to use
size : int, default = -1
Number of samples in the shard for the wrapped pipeline (if there is more than
one it is a sum)
Providing -1 means that the iterator will work until StopIteration is raised
from the inside of iter_setup(). The options `last_batch_policy` and
`last_batch_padded` don't work in such case. It works with only one pipeline inside
the iterator.
Mutually exclusive with `reader_name` argument
reader_name : str, default = None
Name of the reader which will be queried to the shard size, number of shards and
all other properties necessary to count properly the number of relevant and padded
samples that iterator needs to deal with. It automatically sets
`last_batch_padded` accordingly to match the reader's configuration.
auto_reset : string or bool, optional, default = False
Whether the iterator resets itself for the next epoch or it requires reset() to be
called explicitly.
It can be one of the following values:
* ``"no"``, ``False`` or ``None`` - at the end of epoch StopIteration is raised
and reset() needs to be called
* ``"yes"`` or ``"True"``- at the end of epoch StopIteration is raised but reset()
is called internally automatically
dynamic_shape : any, optional,
Parameter used only for backward compatibility.
fill_last_batch : bool, optional, default = None
**Deprecated** Please use ``last_batch_policy`` instead
Whether to fill the last batch with data up to 'self.batch_size'.
The iterator would return the first integer multiple
of self._num_gpus * self.batch_size entries which exceeds 'size'.
Setting this flag to False will cause the iterator to return
exactly 'size' entries.
last_batch_policy: optional, default = LastBatchPolicy.FILL
What to do with the last batch when there are not enough samples in the epoch
to fully fill it. See :meth:`nvidia.dali.plugin.base_iterator.LastBatchPolicy`
last_batch_padded : bool, optional, default = False
Whether the last batch provided by DALI is padded with the last sample
or it just wraps up. In the conjunction with ``last_batch_policy`` it tells
if the iterator returning last batch with data only partially filled with
data from the current epoch is dropping padding samples or samples from
the next epoch. If set to ``False`` next
epoch will end sooner as data from it was consumed but dropped. If set to
True next epoch would be the same length as the first one. For this to happen,
the option `pad_last_batch` in the reader needs to be set to True as well.
It is overwritten when `reader_name` argument is provided
prepare_first_batch : bool, optional, default = True
Whether DALI should buffer the first batch right after the creation of the iterator,
so one batch is already prepared when the iterator is prompted for the data
Example
-------
With the data set ``[1,2,3,4,5,6,7]`` and the batch size 2:
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = True -> last batch = ``[7]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = False -> last batch = ``[7]``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = True -> last batch = ``[7, 7]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = False -> last batch = ``[7, 1]``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = True -> last batch = ``[5, 6]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = False -> last batch = ``[5, 6]``,
next iteration will return ``[2, 3]``
"""
def __init__(self,
pipelines,
size=-1,
reader_name=None,
auto_reset=False,
fill_last_batch=None,
dynamic_shape=False,
last_batch_padded=False,
last_batch_policy=LastBatchPolicy.FILL,
prepare_first_batch=True):
super(DALIClassificationIterator, self).__init__(pipelines, ["data", "label"],
size,
reader_name=reader_name,
auto_reset=auto_reset,
fill_last_batch=fill_last_batch,
dynamic_shape=dynamic_shape,
last_batch_padded=last_batch_padded,
last_batch_policy=last_batch_policy,
prepare_first_batch=prepare_first_batch)
class DALIRaggedIterator(_DaliBaseIterator):
"""
General DALI iterator for PyTorch with ragged tensors.
It can return any number of outputs from the DALI pipeline
in the form of per GPU dictionaries.
These dictionaries consisting of PyTorch Tensors
(for outputs marked as DALIRaggedIterator.DENSE_TAG),
sparse COO PyTorch Tensors
(for outputs marked as DALIRaggedIterator.SPARSE_COO_TAG)
and list of PyTorch Tensors
(for outputs marked as DALIRaggedIterator.SPARSE_LIST_TAG).
Parameters
----------
pipelines : list of nvidia.dali.Pipeline
List of pipelines to use
size : int, default = -1
Number of samples in the shard for the wrapped pipeline (if there is more than
one it is a sum)
Providing -1 means that the iterator will work until StopIteration is raised
from the inside of iter_setup(). The options `last_batch_policy` and
`last_batch_padded` don't work in such case. It works with only one pipeline inside
the iterator.
Mutually exclusive with `reader_name` argument
reader_name : str, default = None
Name of the reader which will be queried to the shard size, number of shards and
all other properties necessary to count properly the number of relevant and padded
samples that iterator needs to deal with. It automatically sets `last_batch_policy`
to PARTIAL when the FILL is used, and `last_batch_padded` accordingly to match
the reader's configuration
output_types : list of str, optional, default = None
List of tags indicating whether the pipeline(s) output batch is
uniform (all the samples have the same size) or not. Batch output marked
as the former will be returned as a single PyTorch Tensor, the latter
will be returned as a specified sparse PyTorch Tensor format.
Must be either DALIRaggedIterator.DENSE_TAG
or DALIRaggedIterator.SPARSE_LIST_TAG
or DALIRaggedIterator.SPARSE_COO_TAG
Length of output_types must match the number of output of the pipeline(s).
If not set, all outputs are considered to be marked with
DALIRaggedIterator.DENSE_TAG.
For now sparse mode supports only list of tensors and COO sparse tensor format.
auto_reset : string or bool, optional, default = False
Whether the iterator resets itself for the next epoch or it requires reset() to be
called explicitly.
It can be one of the following values:
* ``"no"``, ``False`` or ``None`` - at the end of epoch StopIteration is raised
and reset() needs to be called
* ``"yes"`` or ``"True"``- at the end of epoch StopIteration is raised but reset()
is called internally automatically
fill_last_batch : bool, optional, default = None
**Deprecated** Please use ``last_batch_policy`` instead
Whether to fill the last batch with data up to 'self.batch_size'.
The iterator would return the first integer multiple
of self._num_gpus * self.batch_size entries which exceeds 'size'.
Setting this flag to False will cause the iterator to return
exactly 'size' entries.
last_batch_policy: optional, default = LastBatchPolicy.FILL
What to do with the last batch when there are not enough samples in the epoch
to fully fill it. See :meth:`nvidia.dali.plugin.base_iterator.LastBatchPolicy`
last_batch_padded : bool, optional, default = False
Whether the last batch provided by DALI is padded with the last sample
or it just wraps up. In the conjunction with ``last_batch_policy`` it tells
if the iterator returning last batch with data only partially filled with
data from the current epoch is dropping padding samples or samples from
the next epoch (it doesn't literally drop but sets ``pad`` field of ndarray
so the following code could use it to drop the data). If set to ``False`` next
epoch will end sooner as data from it was consumed but dropped. If set to
True next epoch would be the same length as the first one. For this to happen,
the option `pad_last_batch` in the reader needs to be set to True as well.
It is overwritten when `reader_name` argument is provided
prepare_first_batch : bool, optional, default = True
Whether DALI should buffer the first batch right after the creation of the iterator,
so one batch is already prepared when the iterator is prompted for the data
Example
-------
With the data set ``[1,2,3,4,5,6,7]`` and the batch size 2:
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = True ->
last batch = ``[7]``, next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = False ->
last batch = ``[7]``, next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = True ->
last batch = ``[7, 7]``, next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = False ->
last batch = ``[7, 1]``, next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = True ->
last batch = ``[5, 6]``, next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = False ->
last batch = ``[5, 6]``, next iteration will return ``[2, 3]``
"""
def __init__(self,
pipelines,
output_map,
size=-1,
reader_name=None,
output_types=None,
auto_reset=False,
fill_last_batch=None,
dynamic_shape=False,
last_batch_padded=False,
last_batch_policy=LastBatchPolicy.FILL,
prepare_first_batch=True):
# check the assert first as _DaliBaseIterator would run the prefetch
self._output_tags = {
DALIRaggedIterator.DENSE_TAG,
DALIRaggedIterator.SPARSE_LIST_TAG,
DALIRaggedIterator.SPARSE_COO_TAG
}
assert len(set(output_map)) == len(output_map), "output_map names should be distinct"
assert output_types is None or set(output_types) <= self._output_tags, \
"Only DENSE_TAG, SPARSE_LIST_TAG and SPARSE_COO_TAG are allowed"
self.output_map = output_map
self._outputs_types = output_types
super(DALIRaggedIterator, self).__init__(
pipelines,
size,
reader_name,
auto_reset,
fill_last_batch,
last_batch_padded,
last_batch_policy,
prepare_first_batch
)
self._first_batch = None
if self._prepare_first_batch:
try:
self._first_batch = self._first_batch = DALIRaggedIterator.__next__(self)
# call to `next` sets _ever_consumed to True but if we are just calling it from
# here we should set if to False again
self._ever_consumed = False
except StopIteration:
assert False, "It seems that there is no data in the pipeline. This may happen " \
"if `last_batch_policy` is set to PARTIAL and the requested batch size is " \
"greater than the shard size."
def __next__(self):
self._ever_consumed = True
if self._first_batch is not None:
batch = self._first_batch
self._first_batch = None
return batch
# Gather outputs
dali_outputs = self._get_outputs()
data_batches = [None for i in range(self._num_gpus)]
for i in range(self._num_gpus):
dev_id = self._pipes[i].device_id
# initialize dict for all output categories
category_outputs = dict()
# segregate outputs into categories
for j, out in enumerate(dali_outputs[i]):
category_outputs[self.output_map[j]] = out
# Change DALI TensorLists into Tensors
category_tensors = dict()
category_shapes = dict()
category_torch_type = dict()
category_device = dict()
torch_gpu_device = None
torch_cpu_device = torch.device('cpu')
for j, (category, out) in enumerate(category_outputs.items()):
if self._outputs_types is None or \
self._outputs_types[j] == DALIRaggedIterator.DENSE_TAG:
category_tensors[category] = out.as_tensor()
category_shapes[category] = category_tensors[category].shape()
else:
category_tensors[category] = [x for x in out]
category_shapes[category] = [x.shape() for x in out]
# check dtype
category_torch_type[category] = to_torch_type[out.dtype]
# check device
if type(out) is TensorListGPU:
if not torch_gpu_device:
torch_gpu_device = torch.device('cuda', dev_id)
category_device[category] = torch_gpu_device
else:
category_device[category] = torch_cpu_device
pyt_tensors = dict()
for j, category in enumerate(self.output_map):
if self._outputs_types is None or \
self._outputs_types[j] == DALIRaggedIterator.DENSE_TAG:
pyt_tensors[category] = torch.empty(category_shapes[category],
dtype=category_torch_type[category],
device=category_device[category])
else:
pyt_tensors[category] = [
torch.empty(shape,
dtype=category_torch_type[category],
device=category_device[category])
for shape in category_shapes[category]
]
data_batches[i] = pyt_tensors
# Copy data from DALI Tensors to torch tensors
for j, (category, tensor) in enumerate(category_tensors.items()):
if self._outputs_types is None or \
self._outputs_types[j] == DALIRaggedIterator.DENSE_TAG:
if isinstance(tensor, (TensorGPU, TensorListGPU)):
# Using same cuda_stream used by torch.zeros to set the memory
stream = torch.cuda.current_stream(device=pyt_tensors[category].device)
feed_ndarray(tensor, pyt_tensors[category], cuda_stream=stream)
else:
feed_ndarray(tensor, pyt_tensors[category])
else:
for k, single_tensor in enumerate(tensor):
if isinstance(tensor, (TensorGPU, TensorListGPU)):
# Using same cuda_stream used by torch.zeros to set the memory
stream = torch.cuda.current_stream(
device=pyt_tensors[category][k].device
)
feed_ndarray(
single_tensor, pyt_tensors[category][k], cuda_stream=stream
)
else:
feed_ndarray(single_tensor, pyt_tensors[category][k])
if self._outputs_types[j] == DALIRaggedIterator.SPARSE_COO_TAG:
values = torch.hstack(pyt_tensors[category])
indices = [
[(i, j) for j in range(shape[0])]
for i, shape in enumerate(category_shapes[category])
]
indices = [indice for el_indices in indices for indice in el_indices]
indices = torch.LongTensor(indices, device=values.device)
pyt_tensors[category] = torch.sparse_coo_tensor(indices.T, values)
self._schedule_runs()
self._advance_and_check_drop_last()
if self._reader_name:
if_drop, left = self._remove_padded()
if np.any(if_drop):
output = []
for batch, to_copy in zip(data_batches, left):
batch = batch.copy()
for category in self.output_map:
batch[category] = batch[category][0:to_copy]
output.append(batch)
return output
else:
if self._last_batch_policy == LastBatchPolicy.PARTIAL and (
self._counter > self._size) and self._size > 0:
# First calculate how much data is required to return exactly self._size entries.
diff = self._num_gpus * self.batch_size - (self._counter - self._size)
# Figure out how many GPUs to grab from.
numGPUs_tograb = int(np.ceil(diff / self.batch_size))
# Figure out how many results to grab from the last GPU
# (as a fractional GPU batch may be required to bring us
# right up to self._size).
mod_diff = diff % self.batch_size
data_fromlastGPU = mod_diff if mod_diff else self.batch_size
# Grab the relevant data.
# 1) Grab everything from the relevant GPUs.
# 2) Grab the right data from the last GPU.
# 3) Append data together correctly and return.
output = data_batches[0:numGPUs_tograb]
output[-1] = output[-1].copy()
for category in self._output_categories:
output[-1][category] = output[-1][category][0:data_fromlastGPU]
return output
return data_batches
DENSE_TAG = "dense"
SPARSE_LIST_TAG = "sparse_list"
SPARSE_COO_TAG = "sparse_coo"
class TorchPythonFunction(ops.PythonFunctionBase):
schema_name = "TorchPythonFunction"
ops.register_cpu_op('TorchPythonFunction')
ops.register_gpu_op('TorchPythonFunction')
def _torch_stream_wrapper(self, function, *ins):
with torch.cuda.stream(self.stream):
out = function(*ins)
self.stream.synchronize()
return out
def torch_wrapper(self, batch_processing, function, device, *args):
func = function if device == 'cpu' else \
lambda *ins: self._torch_stream_wrapper(function, *ins)
if batch_processing:
return ops.PythonFunction.function_wrapper_batch(func,
self.num_outputs,
torch.utils.dlpack.from_dlpack,
torch.utils.dlpack.to_dlpack,
*args)
else:
return ops.PythonFunction.function_wrapper_per_sample(func,
self.num_outputs,
torch_dlpack.from_dlpack,
torch_dlpack.to_dlpack,
*args)
def __call__(self, *inputs, **kwargs):
pipeline = Pipeline.current()
if pipeline is None:
Pipeline._raise_no_current_pipeline("TorchPythonFunction")
if self.stream is None:
self.stream = torch.cuda.Stream(device=pipeline.device_id)
return super(TorchPythonFunction, self).__call__(*inputs, **kwargs)
def __init__(self, function, num_outputs=1, device='cpu', batch_processing=False, **kwargs):
self.stream = None
super(TorchPythonFunction, self).__init__(impl_name="DLTensorPythonFunctionImpl",
function=lambda *ins:
self.torch_wrapper(batch_processing,
function, device,
*ins),
num_outputs=num_outputs, device=device,
batch_processing=batch_processing, **kwargs)
ops._wrap_op(TorchPythonFunction, "fn", __name__)
|
DALI-main
|
dali/python/nvidia/dali/plugin/pytorch.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import types
import math
import logging
import numpy as np
import warnings
from enum import Enum, unique
from collections.abc import Iterable
def _iterator_deprecation_warning():
warnings.warn("Please set `reader_name` and don't set last_batch_padded and size manually " +
"whenever possible. This may lead, in some situations, to missing some " +
"samples or returning duplicated ones. Check the Sharding section of the "
"documentation for more details.",
Warning, stacklevel=2)
@unique
class LastBatchPolicy(Enum):
"""
Describes the last batch policy behavior when there are not enough samples in the epoch
to fill a whole batch.
* FILL - The last batch is filled by either repeating the last sample or by wrapping
up the data set. The precise behavior depends on the reader's ``pad_last_batch`` argument
* DROP - The last batch is dropped if it cannot be fully filled with data from the current
epoch
* PARTIAL - The last batch is partially filled with the remaining data from the current\
epoch, keeping the rest of the samples empty
"""
FILL = 0
DROP = 1
PARTIAL = 2
class _DaliBaseIterator(object):
"""
DALI base iterator class. Shouldn't be used directly.
Parameters
----------
pipelines : list of nvidia.dali.Pipeline
List of pipelines to use
output_map : list of (str, str)
List of pairs (output_name, tag) which maps consecutive
outputs of DALI pipelines to proper field in MXNet's
DataBatch.
tag is one of DALIGenericIterator.DATA_TAG
and DALIGenericIterator.LABEL_TAG mapping given output
for data or label correspondingly.
output_names should be distinct.
size : int, default = -1
Number of samples in the shard for the wrapped pipeline (if there is more than one
it is a sum). Providing -1 means that the iterator will work until StopIteration
is raised from the inside of iter_setup(). The options `last_batch_policy`,
`last_batch_padded` and `auto_reset` don't work in such case. It works with only
one pipeline inside the iterator.
Mutually exclusive with `reader_name` argument
reader_name : str, default = None
Name of the reader which will be queried to the shard size, number of shards, and
all other properties necessary to count properly the number of relevant and padded
samples that iterator needs to deal with. Sets `last_batch_padded`
accordingly to the reader's configuration (`pad_last_batch` reader argument)
auto_reset : string or bool, optional, default = False
Whether the iterator resets itself for the next epoch or it requires reset() to be
called explicitly.
It can be one of the following values:
* ``"no"``, ``False`` or ``None`` - at the end of epoch StopIteration is raised and
reset() needs to be called. Calling ``iter()`` on the iterator would reset
it as well.
* ``"yes"`` or ``"True"``- at the end of epoch StopIteration is raised but reset()
is called internally automatically
fill_last_batch : bool, optional, default = None
**Deprecated** Please use ``last_batch_policy`` instead
Whether to fill the last batch with data up to 'self.batch_size'.
The iterator would return the first integer multiple
of self._num_gpus * self.batch_size entries which exceeds 'size'.
Setting this flag to False will cause the iterator to return
exactly 'size' entries.
last_batch_policy: optional, default = LastBatchPolicy.FILL
What to do with the last batch when there are not enough samples in the epoch
to fully fill it. See :meth:`nvidia.dali.plugin.base_iterator.LastBatchPolicy`
last_batch_padded : bool, optional, default = False
Whether the last batch provided by DALI is padded with the last sample
or it just wraps up. In the conjunction with ``last_batch_policy`` it tells
if the iterator returning last batch with data only partially filled with
data from the current epoch is dropping padding samples or samples from
the next epoch. If set to False next
epoch will end sooner as data from it was consumed but dropped. If set to
True next epoch would be the same length as the first one. For this to happen,
the option `pad_last_batch` in the reader needs to be set to True as well.
It is overwritten when `reader_name` argument is provided
prepare_first_batch : bool, optional, default = True
Whether DALI should buffer the first batch right after the creation of the iterator,
so one batch is already prepared when the iterator is prompted for the data
Example
-------
With the data set ``[1,2,3,4,5,6,7]`` and the batch size 2:
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = True -> last batch = ``[7]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.PARTIAL, last_batch_padded = False -> last batch = ``[7]``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = True -> last batch = ``[7, 7]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = False -> last batch = ``[7, 1]``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = True -> last batch = ``[5, 6]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = False -> last batch = ``[5, 6]``,
next iteration will return ``[2, 3]``
"""
def __init__(self,
pipelines,
size=-1,
reader_name=None,
auto_reset=False,
fill_last_batch=None,
last_batch_padded=False,
last_batch_policy=LastBatchPolicy.FILL,
prepare_first_batch=True):
assert pipelines is not None, "Number of provided pipelines has to be at least 1"
if not isinstance(pipelines, list):
pipelines = [pipelines]
self._num_gpus = len(pipelines)
# frameworks expect from its data iterators to have batch_size field,
# so it is not possible to use _batch_size instead
self.batch_size = pipelines[0].max_batch_size
assert np.all(np.equal([pipe.max_batch_size for pipe in pipelines], self.batch_size)), \
"All pipelines should have the same batch size set"
self._size = int(size)
if not auto_reset or auto_reset is None or auto_reset == "no":
self._auto_reset = "no"
elif auto_reset or auto_reset == "yes":
self._auto_reset = "yes"
else:
raise ValueError(f"Unsupported value for `auto_reset` {auto_reset}")
self._prepare_first_batch = prepare_first_batch
if fill_last_batch is not None:
warnings.warn("Please do not use `fill_last_batch` and use `last_batch_policy` \
instead.", Warning, stacklevel=2)
if fill_last_batch:
self._last_batch_policy = LastBatchPolicy.FILL
else:
self._last_batch_policy = LastBatchPolicy.PARTIAL
else:
if type(last_batch_policy) is not LastBatchPolicy:
raise ValueError("Wrong type for `last_batch_policy`. "
f"Expected {LastBatchPolicy}, got {type(last_batch_policy)}")
self._last_batch_policy = last_batch_policy
self._last_batch_padded = last_batch_padded
assert self._size != 0, "Size cannot be 0"
assert self._size > 0 or (self._size < 0 and (len(pipelines) == 1 or reader_name)), \
"Negative size is supported only for a single pipeline"
assert not reader_name or (reader_name and self._size < 0), \
"When reader_name is provided, size should not be set"
assert not reader_name or (reader_name and not last_batch_padded), \
"When reader_name is provided, last_batch_padded should not be set"
if self._size < 0 and not reader_name:
self._last_batch_policy = LastBatchPolicy.FILL
self._last_batch_padded = False
if self.size > 0 and not reader_name:
_iterator_deprecation_warning()
self._pipes = pipelines
self._counter = 0
# Build all pipelines
for p in self._pipes:
with p._check_api_type_scope(types.PipelineAPIType.ITERATOR):
p.build()
self._reader_name = reader_name
self._extract_from_reader_and_validate()
self._ever_scheduled = False
self._ever_consumed = False
def _calculate_shard_sizes(self, shard_nums):
shards_beg = np.floor(shard_nums * self._size_no_pad / self._shards_num)
shards_end = np.floor((shard_nums + 1) * self._size_no_pad / self._shards_num)
shards_beg = shards_beg.astype(np.int64)
shards_end = shards_end.astype(np.int64)
return shards_end - shards_beg
def _extract_from_reader_and_validate(self):
if self._reader_name:
readers_meta = [p.reader_meta(self._reader_name) for p in self._pipes]
def err_msg_gen(err_msg):
return 'Reader Operator should have the same {} in all the pipelines.'.format(
err_msg
)
def check_equality_and_get(input_meta, name, err_msg):
assert np.all(np.equal([meta[name] for meta in input_meta], input_meta[0][name])), \
err_msg_gen(err_msg)
return input_meta[0][name]
def check_all_or_none_and_get(input_meta, name, err_msg):
assert np.all([meta[name] for meta in readers_meta]) or \
not np.any([meta[name] for meta in readers_meta]), \
err_msg_gen(err_msg)
return input_meta[0][name]
self._size_no_pad = check_equality_and_get(readers_meta,
"epoch_size", "size value")
self._shards_num = check_equality_and_get(readers_meta,
"number_of_shards",
"`num_shards` argument set")
self._last_batch_padded = check_all_or_none_and_get(readers_meta, "pad_last_batch",
"`pad_last_batch` argument set")
self._is_stick_to_shard = check_all_or_none_and_get(readers_meta, "stick_to_shard",
"`stick_to_shard` argument set")
self._shards_id = np.array([meta["shard_id"] for meta in readers_meta], dtype=np.int64)
if self._last_batch_policy == LastBatchPolicy.DROP:
# when DROP policy is used round down the shard size
self._size = self._size_no_pad // self._shards_num
elif self._last_batch_padded:
# if padding is enabled all shards are equal
self._size = readers_meta[0]["epoch_size_padded"] // self._shards_num
else:
# get the size as a multiply of the batch size that is bigger or equal
# than the biggest shard
self._size = math.ceil(math.ceil(self._size_no_pad / self._shards_num) /
self.batch_size) * self.batch_size
# count where we starts inside each GPU shard in given epoch,
# if shards are uneven this will differ epoch2epoch
self._counter_per_gpu = np.zeros(self._shards_num, dtype=np.int64)
self._shard_sizes_per_gpu = self._calculate_shard_sizes(np.arange(0, self._shards_num))
# to avoid recalculation of shard sizes when iterator moves across the shards
# memorize the initial shard sizes and then use chaning self._shards_id to index it
self._shard_sizes_per_gpu_initial = self._shard_sizes_per_gpu.copy()
def _remove_padded(self):
"""
Checks if remove any padded sample and how much.
Calculates the number of padded samples in the batch for each pipeline
wrapped up by the iterator. Returns if there is any padded data that
needs to be dropped and if so how many samples in each GPU
"""
if_drop = False
left = -1
if self._last_batch_policy == LastBatchPolicy.PARTIAL:
# calculate each shard size for each id, and check how many samples are left
# by subtracting from iterator counter the shard size, then go though all GPUs
# and check how much data needs to be dropped
left = self.batch_size - \
(self._counter - self._shard_sizes_per_gpu_initial[self._shards_id])
if_drop = np.less(left, self.batch_size)
return if_drop, left
def _get_outputs(self):
"""
Checks iterator stop condition, gets DALI outputs and perform reset in case of StopIteration
"""
# if pipeline was not scheduled ever do it here
if not self._ever_scheduled:
self._schedule_runs(False)
if self._size > 0 and self._counter >= self._size:
self._end_iteration()
outputs = []
try:
for p in self._pipes:
with p._check_api_type_scope(types.PipelineAPIType.ITERATOR):
outputs.append(p.share_outputs())
except StopIteration as e:
# in case ExternalSource returns StopIteration
if self._size < 0 and self._auto_reset == "yes":
self.reset()
raise e
self._check_batch_size(outputs)
return outputs
def _check_batch_size(self, outs):
if not isinstance(outs, Iterable):
outs = [outs]
if self._reader_name or self._size != -1:
for out in outs:
for o in out:
batch_len = len(o)
assert self.batch_size == batch_len, \
"Variable batch size is not supported by the iterator " + \
"when reader_name is provided or iterator size is set explicitly"
def _end_iteration(self):
if self._auto_reset == "yes":
self.reset()
raise StopIteration
def _schedule_runs(self, release_outputs=True):
"""
Schedule DALI runs
"""
self._ever_scheduled = True
for p in self._pipes:
with p._check_api_type_scope(types.PipelineAPIType.ITERATOR):
if release_outputs:
p.release_outputs()
p.schedule_run()
def _advance_and_check_drop_last(self, dry_run=False, end_iteration=True):
"""
Checks whether the current batch is not fully filled and whether it should be dropped.
It could be dry run without changing the iterator state and not raising StopIteration
"""
# check if for given initial count in any GPU with the current value of the samples read
# if we read one more batch would we overflow
counter = self._counter
should_end = False
if self._reader_name:
counter += self.batch_size
if self._last_batch_policy == LastBatchPolicy.DROP:
should_end = np.any(self._counter_per_gpu + counter > self._shard_sizes_per_gpu)
else:
counter += self._num_gpus * self.batch_size
if self._last_batch_policy == LastBatchPolicy.DROP:
should_end = counter > self._size
if not dry_run:
self._counter = counter
if should_end and end_iteration:
self._end_iteration()
return should_end
def reset(self):
"""
Resets the iterator after the full epoch.
DALI iterators do not support resetting before the end of the epoch
and will ignore such request.
"""
# in the case of the DROP policy the user who runs DALI, based on the iterator length,
# can assume there is no more data in the pipeline where there still is the last,
# incomplete batch, we need to extract from the pipeline and drop before rising
# StopIteration indicating the pipeline is depleted. Here we first check if that
# is the case, and if so we run the pipeline and drop the last batch
if self._last_batch_policy == LastBatchPolicy.DROP:
should_end = self._advance_and_check_drop_last(dry_run=True, end_iteration=False)
already_ended = self._size > 0 and self._counter >= self._size
if should_end and not already_ended:
self._get_outputs()
self._schedule_runs()
self._advance_and_check_drop_last(end_iteration=False)
if self._counter >= self._size or self._size < 0:
if self._last_batch_policy == LastBatchPolicy.FILL and not self._last_batch_padded:
if self._reader_name:
# accurate way
# get the number of samples read in this epoch by each GPU
# self._counter had initial value of min(self._counter_per_gpu) so subtract
# this to get the actual value
self._counter -= min(self._counter_per_gpu)
self._counter_per_gpu = self._counter_per_gpu + self._counter
# check how much each GPU read ahead from next shard, as shards have different
# size each epoch GPU may read ahead or not
self._counter_per_gpu = self._counter_per_gpu - self._shard_sizes_per_gpu
# to make sure that in the next epoch we read the whole shard we need
# to set start value to the smallest one
self._counter = min(self._counter_per_gpu)
else:
# legacy way
self._counter = self._counter % self._size
else:
self._counter = 0
# advance to the next shard
if self._reader_name:
if not self._is_stick_to_shard:
# move shards id for wrapped pipelines
self._shards_id = (self._shards_id + 1) % self._shards_num
# revaluate _size
if self._last_batch_policy == LastBatchPolicy.FILL and not self._last_batch_padded:
# move all shards ids GPU ahead
if not self._is_stick_to_shard:
self._shard_sizes_per_gpu = np.roll(self._shard_sizes_per_gpu, 1)
# check how many samples we need to reach from each shard in next epoch
# per each GPU taking into account already read
read_in_next_epoch = self._shard_sizes_per_gpu - self._counter_per_gpu
# get the maximum number of samples and round it up to full batch sizes
self._size = math.ceil(max(read_in_next_epoch) / self.batch_size) * \
self.batch_size
# in case some epoch is skipped because we have read ahead in this epoch so
# much that in the next one we done already
if self._size == 0:
# it means that self._shard_sizes_per_gpu == self._counter_per_gpu,
# so we can jump to the next epoch and zero self._counter_per_gpu
self._counter_per_gpu = np.zeros(self._shards_num, dtype=np.int64)
# self._counter = min(self._counter_per_gpu), but just set 0
# to make it simpler
self._counter = 0
# roll once again
self._shard_sizes_per_gpu = np.roll(self._shard_sizes_per_gpu, 1)
# as self._counter_per_gpu is 0 we can just use
# read_in_next_epoch = self._shard_sizes_per_gpu
self._size = math.ceil(max(self._shard_sizes_per_gpu) / self.batch_size) * \
self.batch_size
for p in self._pipes:
p.reset()
if p.empty():
with p._check_api_type_scope(types.PipelineAPIType.ITERATOR):
p.schedule_run()
else:
logging.warning("DALI iterator does not support resetting while epoch is not finished. \
Ignoring...")
def next(self):
"""
Returns the next batch of data.
"""
self._ever_consumed = True
return self.__next__()
def __next__(self):
raise NotImplementedError
def __iter__(self):
# avoid redundant reset when someone would call `iter()` on a new iterator
# do not reset if no data was consumed from the iterator - to avoid unintended
# buffering in the pipeline and the FW iterator
if self._counter != 0 and self._ever_consumed:
self.reset()
return self
@property
def size(self):
return self._size
def __len__(self):
if self._reader_name:
if self._last_batch_policy != LastBatchPolicy.DROP:
return math.ceil(self.size / self.batch_size)
else:
return self.size // self.batch_size
else:
if self._last_batch_policy != LastBatchPolicy.DROP:
return math.ceil(self.size / (self._num_gpus * self.batch_size))
else:
return self.size // (self._num_gpus * self.batch_size)
|
DALI-main
|
dali/python/nvidia/dali/plugin/base_iterator.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.version import LooseVersion
from nvidia.dali import backend as _b
from nvidia.dali.pipeline import Pipeline
from nvidia.dali.data_node import DataNode as _DataNode
from nvidia.dali import ops
from nvidia.dali import types as dali_types
from numba import types as numba_types
from numba import njit, cfunc, carray, cuda
import numpy as np
import numba as nb
_to_numpy = {
dali_types.UINT8: "uint8",
dali_types.UINT16: "uint16",
dali_types.UINT32: "uint32",
dali_types.UINT64: "uint64",
dali_types.INT8: "int8",
dali_types.INT16: "int16",
dali_types.INT32: "int32",
dali_types.INT64: "int64",
dali_types.FLOAT16: "float16",
dali_types.FLOAT: "float32",
dali_types.FLOAT64: "float64",
}
_to_numba = {
dali_types.UINT8: numba_types.uint8,
dali_types.UINT16: numba_types.uint16,
dali_types.UINT32: numba_types.uint32,
dali_types.UINT64: numba_types.uint64,
dali_types.INT8: numba_types.int8,
dali_types.INT16: numba_types.int16,
dali_types.INT32: numba_types.int32,
dali_types.INT64: numba_types.int64,
dali_types.FLOAT16: numba_types.float16,
dali_types.FLOAT: numba_types.float32,
dali_types.FLOAT64: numba_types.float64,
}
# Minimal version of Numba that is required for Numba GPU operator to work
minimal_numba_version = LooseVersion('0.55.2')
@nb.extending.intrinsic
def address_as_void_pointer(typingctx, src):
from numba.core import types, cgutils
sig = types.voidptr(src)
def codegen(cgctx, builder, sig, args):
return builder.inttoptr(args[0], cgutils.voidptr_t)
return sig, codegen
@njit
def _get_shape_view(shapes_ptr, ndims_ptr, num_dims, num_samples):
ndims = carray(address_as_void_pointer(ndims_ptr), num_dims, dtype=np.int32)
samples = carray(address_as_void_pointer(shapes_ptr), (num_dims, num_samples), dtype=np.int64)
ret = []
for sample, size in zip(samples, ndims):
d = []
for shape_ptr in sample:
d.append(carray(address_as_void_pointer(shape_ptr), size, dtype=np.int64))
ret.append(d)
return ret
class NumbaFunction(metaclass=ops._DaliOperatorMeta):
schema_name = 'NumbaFunction'
ops.register_cpu_op('NumbaFunction')
ops.register_gpu_op('NumbaFunction')
@property
def spec(self):
return self._spec
@property
def schema(self):
return self._schema
@property
def device(self):
return self._device
@property
def preserve(self):
return self._preserve
def _setup_fn_sig(self):
return numba_types.void(numba_types.uint64,
numba_types.uint64,
numba_types.int32,
numba_types.uint64,
numba_types.uint64,
numba_types.int32, numba_types.int32)
def _run_fn_sig(self, batch_processing=False):
sig_types = []
sig_types.append(numba_types.uint64)
sig_types.append(numba_types.uint64)
sig_types.append(numba_types.uint64)
sig_types.append(numba_types.int32)
sig_types.append(numba_types.uint64)
sig_types.append(numba_types.uint64)
sig_types.append(numba_types.uint64)
sig_types.append(numba_types.int32)
if batch_processing:
sig_types.append(numba_types.int32)
return numba_types.void(*sig_types)
def _get_carray_eval_lambda(self, dtype, ndim):
eval_string = "lambda ptr, shape: carray(ptr, ("
for i in range(ndim):
eval_string += "shape[{}]".format(i)
eval_string += ", " if i + 1 != ndim else "), "
eval_string += "dtype=np.{})".format(_to_numpy[dtype])
return njit(eval(eval_string))
def _get_carrays_eval_lambda(self, types, ndim):
ret = [self._get_carray_eval_lambda(dtype, ndim) for dtype, ndim in zip(types, ndim)]
ret += [njit(eval(("lambda x, y: None"))) for i in range(6 - len(types))]
return tuple(ret)
def _get_run_fn_lambda(self, num_outs, num_ins):
eval_string = ("lambda run_fn, out0, out1, out2, out3, out4, out5, "
"in0, in1, in2, in3, in4, in5 : "
"run_fn(")
for i in range(num_outs):
eval_string += "out{}".format(i)
eval_string += ", " if i + 1 != num_outs else ", "
for i in range(num_ins):
eval_string += "in{}".format(i)
eval_string += ", " if i + 1 != num_ins else ")"
return njit(eval(eval_string))
def _get_setup_fn_cpu(self, setup_fn):
setup_fn_address = None
if setup_fn is not None:
setup_fn = njit(setup_fn)
@cfunc(self._setup_fn_sig(), nopython=True)
def setup_cfunc(out_shapes_ptr, out_ndims_ptr, num_outs,
in_shapes_ptr, in_ndims_ptr, num_ins,
num_samples):
out_shapes_np = _get_shape_view(out_shapes_ptr, out_ndims_ptr,
num_outs, num_samples)
in_shapes_np = _get_shape_view(in_shapes_ptr, in_ndims_ptr,
num_outs, num_samples)
setup_fn(out_shapes_np, in_shapes_np)
setup_fn_address = setup_cfunc.address
return setup_fn_address
def _get_run_fn_gpu(self, run_fn, types, dims):
nvvm_options = {
'fastmath': False,
'opt': 3
}
cuda_arguments = []
for dali_type, ndim in zip(types, dims):
cuda_arguments.append(numba_types.Array(_to_numba[dali_type], ndim, 'C'))
if LooseVersion(nb.__version__) < LooseVersion('0.57.0'):
cres = cuda.compiler.compile_cuda(run_fn, numba_types.void, cuda_arguments)
else:
pipeline = Pipeline.current()
device_id = pipeline.device_id
old_device = nb.cuda.api.get_current_device().id
cc = nb.cuda.api.select_device(device_id).compute_capability
nb.cuda.api.select_device(old_device)
cres = cuda.compiler.compile_cuda(run_fn, numba_types.void, cuda_arguments, cc=cc)
tgt_ctx = cres.target_context
code = run_fn.__code__
filename = code.co_filename
linenum = code.co_firstlineno
if LooseVersion(nb.__version__) < LooseVersion('0.57.0'):
nvvm_options['debug'] = False
nvvm_options['lineinfo'] = False
lib, _ = tgt_ctx.prepare_cuda_kernel(cres.library, cres.fndesc,
True, nvvm_options,
filename, linenum)
else:
lib, _ = tgt_ctx.prepare_cuda_kernel(cres.library, cres.fndesc,
False, True, nvvm_options,
filename, linenum)
handle = lib.get_cufunc().handle
return handle.value
def _get_run_fn_cpu(self, run_fn, out_types, in_types, outs_ndim, ins_ndim, batch_processing):
out0_lambda, out1_lambda, out2_lambda, out3_lambda, out4_lambda, out5_lambda = \
self._get_carrays_eval_lambda(out_types, outs_ndim)
in0_lambda, in1_lambda, in2_lambda, in3_lambda, in4_lambda, in5_lambda = \
self._get_carrays_eval_lambda(in_types, ins_ndim)
run_fn = njit(run_fn)
run_fn_lambda = self._get_run_fn_lambda(len(out_types), len(in_types))
if batch_processing:
@cfunc(self._run_fn_sig(batch_processing=True), nopython=True)
def run_cfunc(out_ptr, out_shapes_ptr, out_ndims_ptr, num_outs,
in_ptr, in_shapes_ptr, in_ndims_ptr, num_ins,
num_samples):
out0 = out1 = out2 = out3 = out4 = out5 = None
out_shapes_np = _get_shape_view(out_shapes_ptr,
out_ndims_ptr,
num_outs,
num_samples)
out_arr = carray(address_as_void_pointer(out_ptr),
(num_outs, num_samples),
dtype=np.int64)
if num_outs >= 1:
out0 = [out0_lambda(address_as_void_pointer(ptr), shape)
for ptr, shape in zip(out_arr[0], out_shapes_np[0])]
if num_outs >= 2:
out1 = [out1_lambda(address_as_void_pointer(ptr), shape)
for ptr, shape in zip(out_arr[1], out_shapes_np[1])]
if num_outs >= 3:
out2 = [out2_lambda(address_as_void_pointer(ptr), shape)
for ptr, shape in zip(out_arr[2], out_shapes_np[2])]
if num_outs >= 4:
out3 = [out3_lambda(address_as_void_pointer(ptr), shape)
for ptr, shape in zip(out_arr[3], out_shapes_np[3])]
if num_outs >= 5:
out4 = [out4_lambda(address_as_void_pointer(ptr), shape)
for ptr, shape in zip(out_arr[4], out_shapes_np[4])]
if num_outs >= 6:
out5 = [out5_lambda(address_as_void_pointer(ptr), shape)
for ptr, shape in zip(out_arr[5], out_shapes_np[5])]
in0 = in1 = in2 = in3 = in4 = in5 = None
in_shapes_np = _get_shape_view(in_shapes_ptr, in_ndims_ptr, num_ins, num_samples)
in_arr = carray(address_as_void_pointer(in_ptr),
(num_ins, num_samples),
dtype=np.int64)
if num_ins >= 1:
in0 = [in0_lambda(address_as_void_pointer(ptr), shape)
for ptr, shape in zip(in_arr[0], in_shapes_np[0])]
if num_ins >= 2:
in1 = [in1_lambda(address_as_void_pointer(ptr), shape)
for ptr, shape in zip(in_arr[1], in_shapes_np[1])]
if num_ins >= 3:
in2 = [in2_lambda(address_as_void_pointer(ptr), shape)
for ptr, shape in zip(in_arr[2], in_shapes_np[2])]
if num_ins >= 4:
in3 = [in3_lambda(address_as_void_pointer(ptr), shape)
for ptr, shape in zip(in_arr[3], in_shapes_np[3])]
if num_ins >= 5:
in4 = [in4_lambda(address_as_void_pointer(ptr), shape)
for ptr, shape in zip(in_arr[4], in_shapes_np[4])]
if num_ins >= 6:
in5 = [in5_lambda(address_as_void_pointer(ptr), shape)
for ptr, shape in zip(in_arr[5], in_shapes_np[5])]
run_fn_lambda(run_fn,
out0, out1, out2, out3, out4, out5,
in0, in1, in2, in3, in4, in5)
else:
@cfunc(self._run_fn_sig(batch_processing=False), nopython=True)
def run_cfunc(out_ptr, out_shapes_ptr, out_ndims_ptr, num_outs,
in_ptr, in_shapes_ptr, in_ndims_ptr, num_ins):
out0 = out1 = out2 = out3 = out4 = out5 = None
out_shapes_np = _get_shape_view(out_shapes_ptr, out_ndims_ptr, num_outs, 1)
out_arr = carray(address_as_void_pointer(out_ptr), num_outs, dtype=np.int64)
if num_outs >= 1:
out0 = out0_lambda(address_as_void_pointer(out_arr[0]), out_shapes_np[0][0])
if num_outs >= 2:
out1 = out1_lambda(address_as_void_pointer(out_arr[1]), out_shapes_np[1][0])
if num_outs >= 3:
out2 = out2_lambda(address_as_void_pointer(out_arr[2]), out_shapes_np[2][0])
if num_outs >= 4:
out3 = out3_lambda(address_as_void_pointer(out_arr[3]), out_shapes_np[3][0])
if num_outs >= 5:
out4 = out4_lambda(address_as_void_pointer(out_arr[4]), out_shapes_np[4][0])
if num_outs >= 6:
out5 = out5_lambda(address_as_void_pointer(out_arr[5]), out_shapes_np[5][0])
in0 = in1 = in2 = in3 = in4 = in5 = None
in_shapes_np = _get_shape_view(in_shapes_ptr, in_ndims_ptr, num_ins, 1)
in_arr = carray(address_as_void_pointer(in_ptr), num_ins, dtype=np.int64)
if num_ins >= 1:
in0 = in0_lambda(address_as_void_pointer(in_arr[0]), in_shapes_np[0][0])
if num_ins >= 2:
in1 = in1_lambda(address_as_void_pointer(in_arr[1]), in_shapes_np[1][0])
if num_ins >= 3:
in2 = in2_lambda(address_as_void_pointer(in_arr[2]), in_shapes_np[2][0])
if num_ins >= 4:
in3 = in3_lambda(address_as_void_pointer(in_arr[3]), in_shapes_np[3][0])
if num_ins >= 5:
in4 = in4_lambda(address_as_void_pointer(in_arr[4]), in_shapes_np[4][0])
if num_ins >= 6:
in5 = in5_lambda(address_as_void_pointer(in_arr[5]), in_shapes_np[5][0])
run_fn_lambda(run_fn,
out0, out1, out2, out3, out4, out5,
in0, in1, in2, in3, in4, in5)
return run_cfunc.address
def __call__(self, *inputs, **kwargs):
pipeline = Pipeline.current()
if pipeline is None:
Pipeline._raise_no_current_pipeline("NumbaFunction")
inputs = ops._preprocess_inputs(inputs, self._impl_name, self._device, None)
if pipeline is None:
Pipeline._raise_pipeline_required("NumbaFunction operator")
if (len(inputs) > self._schema.MaxNumInput() or
len(inputs) < self._schema.MinNumInput()):
raise ValueError(
("Operator {} expects from {} to " +
"{} inputs, but received {}.")
.format(type(self).__name__,
self._schema.MinNumInput(),
self._schema.MaxNumInput(),
len(inputs)))
for inp in inputs:
if not isinstance(inp, _DataNode):
raise TypeError(
("Expected inputs of type `DataNode`. Received input of type '{}'. " +
"Python Operators do not support Multiple Input Sets.")
.format(type(inp).__name__))
op_instance = ops._OperatorInstance(inputs, self, **kwargs)
op_instance.spec.AddArg("run_fn", self.run_fn)
if self.setup_fn is not None:
op_instance.spec.AddArg("setup_fn", self.setup_fn)
op_instance.spec.AddArg("out_types", self.out_types)
op_instance.spec.AddArg("in_types", self.in_types)
op_instance.spec.AddArg("outs_ndim", self.outs_ndim)
op_instance.spec.AddArg("ins_ndim", self.ins_ndim)
op_instance.spec.AddArg("device", self.device)
op_instance.spec.AddArg("batch_processing", self.batch_processing)
if self.device == 'gpu':
op_instance.spec.AddArg("blocks", self.blocks)
op_instance.spec.AddArg("threads_per_block", self.threads_per_block)
if self.num_outputs == 0:
t_name = self._impl_name + "_id_" + str(op_instance.id) + "_sink"
t = _DataNode(t_name, self._device, op_instance)
pipeline.add_sink(t)
return
outputs = []
for i in range(self.num_outputs):
t_name = op_instance._name
if self.num_outputs > 1:
t_name += "[{}]".format(i)
t = _DataNode(t_name, self._device, op_instance)
op_instance.spec.AddOutput(t.name, t.device)
op_instance.append_output(t)
pipeline.add_sink(t)
outputs.append(t)
return outputs[0] if len(outputs) == 1 else outputs
def __init__(self, run_fn,
out_types, in_types,
outs_ndim, ins_ndim,
setup_fn=None,
device='cpu',
batch_processing=False,
blocks=None,
threads_per_block=None,
**kwargs):
if device == 'gpu':
self._check_minimal_numba_version()
self._check_cuda_compatibility()
assert len(in_types) == len(ins_ndim), ("Number of input types "
"and input dimensions should match.")
assert len(out_types) == len(outs_ndim), ("Number of output types "
"and output dimensions should match.")
if 'float16' in dir(numba_types):
for t in [*in_types, *out_types]:
if t == dali_types.FLOAT16:
raise RuntimeError("Numba does not support float16 for "
"current Python version. "
"Python 3.7 or newer is required")
if device == 'gpu':
assert batch_processing is False, "Currently batch processing for GPU is not supported."
assert len(blocks) == 3, "`blocks` array should contain 3 numbers, while received: " \
f"{len(blocks)}"
for i, block_dim in enumerate(blocks):
assert block_dim > 0, "All dimensions should be positive. Value specified in " \
f"`blocks` at index {i} is nonpositive: {block_dim}"
assert len(threads_per_block) == 3, "`threads_per_block` array should contain 3 " \
f"numbers, while received: {len(threads_per_block)}"
for i, threads in enumerate(threads_per_block):
assert threads > 0, ("All dimensions should be positive. "
"Value specified in `threads_per_block` at index "
f"{i} is nonpositive: {threads}")
if not isinstance(outs_ndim, list):
outs_ndim = [outs_ndim]
if not isinstance(ins_ndim, list):
ins_ndim = [ins_ndim]
if not isinstance(out_types, list):
out_types = [out_types]
if not isinstance(in_types, list):
in_types = [in_types]
self._impl_name = "NumbaFuncImpl"
self._schema = _b.GetSchema(self._impl_name)
self._spec = _b.OpSpec(self._impl_name)
self._device = device
kwargs, self._call_args = ops._separate_kwargs(kwargs)
for key, value in kwargs.items():
self._spec.AddArg(key, value)
if device == 'gpu':
self.run_fn = self._get_run_fn_gpu(run_fn, out_types + in_types, outs_ndim + ins_ndim)
else:
self.run_fn = self._get_run_fn_cpu(run_fn, out_types, in_types, outs_ndim,
ins_ndim, batch_processing)
self.setup_fn = self._get_setup_fn_cpu(setup_fn)
self.out_types = out_types
self.in_types = in_types
self.outs_ndim = outs_ndim
self.ins_ndim = ins_ndim
self.num_outputs = len(out_types)
self.batch_processing = batch_processing
self._preserve = True
self.blocks = blocks
self.threads_per_block = threads_per_block
def _check_minimal_numba_version(self):
current_version = LooseVersion(nb.__version__)
if current_version < minimal_numba_version:
raise RuntimeError("Insufficient Numba version. Numba GPU operator "
f"requires Numba {minimal_numba_version} or higher. "
f"Detected version: {LooseVersion(nb.__version__)}.")
def _check_cuda_compatibility(self):
toolkit_version = cuda.runtime.get_version()
driver_version = cuda.driver.driver.get_version()
if toolkit_version > driver_version:
raise RuntimeError("Environment is not compatible with Numba GPU operator. "
f"Driver version is {driver_version} and CUDA Toolkit "
f"version is {toolkit_version}. "
"Driver cannot be older than the CUDA Toolkit")
ops._wrap_op(NumbaFunction, "fn.experimental", "nvidia.dali.plugin.numba")
|
DALI-main
|
dali/python/nvidia/dali/plugin/numba/experimental.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import experimental # noqa F401
|
DALI-main
|
dali/python/nvidia/dali/plugin/numba/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import jax
from distutils.version import LooseVersion
from .iterator import DALIGenericIterator
assert sys.version_info.major == 3 and sys.version_info.minor >= 8, \
"DALI JAX support requires Python 3.8 or above"
assert LooseVersion(jax.__version__) >= LooseVersion('0.4.11'), \
"DALI JAX support requires JAX 0.4.11 or above"
__all__ = ["DALIGenericIterator"]
|
DALI-main
|
dali/python/nvidia/dali/plugin/jax/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax
import jax.dlpack
from nvidia.dali.backend import TensorGPU
def _to_jax_array(dali_tensor: TensorGPU) -> jax.Array:
"""Converts input DALI tensor to JAX array.
Args:
dali_tensor (TensorGPU): DALI GPU tensor to be converted to JAX array.
Note:
This function performs deep copy of the underlying data. That will change in
future releases.
Warning:
As private this API may change without notice.
Returns:
jax.Array: JAX array with the same values and backing device as
input DALI tensor.
"""
jax_array = jax.dlpack.from_dlpack(dali_tensor._expose_dlpack_capsule())
# For now we need this copy to make sure that underlying memory is available.
# One solution is to implement full DLPack contract in DALI.
# TODO(awolant): Remove this copy.
return jax_array.copy()
|
DALI-main
|
dali/python/nvidia/dali/plugin/jax/integration.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from nvidia.dali.plugin.base_iterator import LastBatchPolicy
from nvidia.dali.plugin.jax.iterator import DALIGenericIterator
from clu.data.dataset_iterator import ArraySpec, ElementSpec
import concurrent.futures
def get_spec_for_array(jax_array):
'''Utility to get ArraySpec for given JAX array.'''
return ArraySpec(
shape=jax_array.shape,
dtype=jax_array.dtype)
class DALIGenericPeekableIterator(DALIGenericIterator):
"""DALI iterator for JAX with peek functionality. Compatible with Google CLU PeekableIterator.
It supports peeking the next element in the iterator without advancing the iterator.
Note:
It is compatible with pipelines that return outputs with constant shape and type. It will
throw an exception if the shape or type of the output changes between iterations.
It provides ``element_spec`` property that returns a dictionary of ``ArraySpec`` objects
for each output category.
Parameters
----------
pipelines : list of nvidia.dali.Pipeline
List of pipelines to use
output_map : list of str
List of strings which maps consecutive outputs
of DALI pipelines to user specified name.
Outputs will be returned from iterator as dictionary
of those names.
Each name should be distinct
size : int, default = -1
Number of samples in the shard for the wrapped pipeline (if there is more than
one it is a sum)
Providing -1 means that the iterator will work until StopIteration is raised
from the inside of iter_setup(). The options `last_batch_policy` and
`last_batch_padded` don't work in such case. It works with only one pipeline inside
the iterator.
Mutually exclusive with `reader_name` argument
reader_name : str, default = None
Name of the reader which will be queried to the shard size, number of shards and
all other properties necessary to count properly the number of relevant and padded
samples that iterator needs to deal with. It automatically sets `last_batch_padded`
accordingly to match the reader's configuration.
auto_reset : string or bool, optional, default = False
Whether the iterator resets itself for the next epoch or it requires reset() to be
called explicitly.
It can be one of the following values:
* ``"no"``, ``False`` or ``None`` - at the end of epoch StopIteration is raised
and reset() needs to be called
* ``"yes"`` or ``"True"``- at the end of epoch StopIteration is raised but reset()
is called internally automatically.
last_batch_policy: optional, default = LastBatchPolicy.FILL
What to do with the last batch when there are not enough samples in the epoch
to fully fill it. See :meth:`nvidia.dali.plugin.base_iterator.LastBatchPolicy`
JAX iterator does not support LastBatchPolicy.PARTIAL
last_batch_padded : bool, optional, default = False
Whether the last batch provided by DALI is padded with the last sample
or it just wraps up. In the conjunction with ``last_batch_policy`` it tells
if the iterator returning last batch with data only partially filled with
data from the current epoch is dropping padding samples or samples from
the next epoch. If set to ``False`` next
epoch will end sooner as data from it was consumed but dropped. If set to
True next epoch would be the same length as the first one. For this to happen,
the option `pad_last_batch` in the reader needs to be set to True as well.
It is overwritten when `reader_name` argument is provided
prepare_first_batch : bool, optional, default = True
Whether DALI should buffer the first batch right after the creation of the iterator,
so one batch is already prepared when the iterator is prompted for the data
sharding : ``jax.sharding.Sharding`` comaptible object that, if present, will be used to
build an output jax.Array for each category. If ``None``, the iterator returns
values compatible with pmapped JAX functions.
Example
-------
With the data set ``[1,2,3,4,5,6,7]`` and the batch size 2:
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = True -> last batch = ``[7, 7]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = False -> last batch = ``[7, 1]``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = True -> last batch = ``[5, 6]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = False -> last batch = ``[5, 6]``,
next iteration will return ``[2, 3]``
Note:
JAX iterator does not support LastBatchPolicy.PARTIAL.
"""
def __init__(
self,
pipelines,
output_map,
size=-1,
reader_name=None,
auto_reset=False,
last_batch_padded=False,
last_batch_policy=LastBatchPolicy.FILL,
prepare_first_batch=True,
sharding=None):
super().__init__(
pipelines,
output_map,
size,
reader_name,
auto_reset,
last_batch_padded,
last_batch_policy,
prepare_first_batch,
sharding)
self._mutex = threading.Lock()
self._pool = None
self._peek = None
# Set element spec based on the first element
self._element_spec = None
peeked_output = self.peek()
self._element_spec = {
output_name: get_spec_for_array(peeked_output[output_name])
for output_name in self._output_categories
}
def _assert_output_shape_and_type(self, output):
if self._element_spec is None:
return output
for key in output:
if get_spec_for_array(output[key]) != self._element_spec[key]:
raise ValueError(
'The shape or type of the output changed between iterations. '
'This is not supported by JAX peekable iterator. '
'Please make sure that the shape and type of the output is constant. '
f'Expected: {self._element_spec[key]}, got: {get_spec_for_array(output[key])} '
f'for output: {key}')
return output
def _next_with_peek_impl(self):
"""Returns the next element from the iterator and advances the iterator.
Is extracted as a separate method to be used by ``peek`` and ``next`` methods
under the same lock.
"""
if self._peek is None:
return self._assert_output_shape_and_type(self._next_impl())
peek = self._peek
self._peek = None
return self._assert_output_shape_and_type(peek)
def __next__(self):
with self._mutex:
return self._next_with_peek_impl()
def peek(self):
"""Returns the next element from the iterator without advancing the iterator.
Returns:
dict : dictionary of jax.Array objects with the next element from the iterator.
"""
with self._mutex:
if self._peek is None:
self._peek = self._next_with_peek_impl()
return self._peek
def peek_async(self):
"""Returns future that will return the next element from
the iterator without advancing the iterator.
Note:
Calling ``peek_async`` without waiting for the future to complete is not
guaranteed to be executed before the next call to ``peek`` or ``next``.
If you want to make sure that the next call to ``peek`` or ``next`` will
return the same element as the future, you need to wait for the future to
complete.
Returns:
concurent.futures.Future: future that will return dictionary of jax.Array
objects with the next element from the iterator.
"""
if self._pool is None:
# Create pool only if needed (peek_async is ever called)
# to avoid thread creation overhead
self._pool = concurrent.futures.ThreadPoolExecutor(max_workers=1)
future = self._pool.submit(self.peek)
return future
@property
def element_spec(self) -> ElementSpec:
"""Returns the element spec for the elements returned by the iterator.
ElementSpec contains ``ArraySpec`` for each output category which describes
shape and type of the output.
Returns:
ElementSpec: Element spec for the elements returned by the iterator.
"""
return self._element_spec
|
DALI-main
|
dali/python/nvidia/dali/plugin/jax/clu.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax
import jax.dlpack
import jax.numpy as jnp
from jax.sharding import NamedSharding, PositionalSharding
from nvidia.dali.plugin.base_iterator import _DaliBaseIterator
from nvidia.dali.plugin.base_iterator import LastBatchPolicy
from .integration import _to_jax_array
class DALIGenericIterator(_DaliBaseIterator):
"""
General DALI iterator for JAX. It can return any number of
outputs from the DALI pipeline in the form of JAX Arrays.
Parameters
----------
pipelines : list of nvidia.dali.Pipeline
List of pipelines to use
output_map : list of str
List of strings which maps consecutive outputs
of DALI pipelines to user specified name.
Outputs will be returned from iterator as dictionary
of those names.
Each name should be distinct
size : int, default = -1
Number of samples in the shard for the wrapped pipeline (if there is more than
one it is a sum)
Providing -1 means that the iterator will work until StopIteration is raised
from the inside of iter_setup(). The options `last_batch_policy` and
`last_batch_padded` don't work in such case. It works with only one pipeline inside
the iterator.
Mutually exclusive with `reader_name` argument
reader_name : str, default = None
Name of the reader which will be queried to the shard size, number of shards and
all other properties necessary to count properly the number of relevant and padded
samples that iterator needs to deal with. It automatically sets `last_batch_padded`
accordingly to match the reader's configuration.
auto_reset : string or bool, optional, default = False
Whether the iterator resets itself for the next epoch or it requires reset() to be
called explicitly.
It can be one of the following values:
* ``"no"``, ``False`` or ``None`` - at the end of epoch StopIteration is raised
and reset() needs to be called
* ``"yes"`` or ``"True"``- at the end of epoch StopIteration is raised but reset()
is called internally automatically.
last_batch_policy: optional, default = LastBatchPolicy.FILL
What to do with the last batch when there are not enough samples in the epoch
to fully fill it. See :meth:`nvidia.dali.plugin.base_iterator.LastBatchPolicy`
JAX iterator does not support LastBatchPolicy.PARTIAL
last_batch_padded : bool, optional, default = False
Whether the last batch provided by DALI is padded with the last sample
or it just wraps up. In the conjunction with ``last_batch_policy`` it tells
if the iterator returning last batch with data only partially filled with
data from the current epoch is dropping padding samples or samples from
the next epoch. If set to ``False`` next
epoch will end sooner as data from it was consumed but dropped. If set to
True next epoch would be the same length as the first one. For this to happen,
the option `pad_last_batch` in the reader needs to be set to True as well.
It is overwritten when `reader_name` argument is provided
prepare_first_batch : bool, optional, default = True
Whether DALI should buffer the first batch right after the creation of the iterator,
so one batch is already prepared when the iterator is prompted for the data
sharding : ``jax.sharding.Sharding`` comaptible object that, if present, will be used to
build an output jax.Array for each category. If ``None``, the iterator returns
values compatible with pmapped JAX functions.
Example
-------
With the data set ``[1,2,3,4,5,6,7]`` and the batch size 2:
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = True -> last batch = ``[7, 7]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.FILL, last_batch_padded = False -> last batch = ``[7, 1]``,
next iteration will return ``[2, 3]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = True -> last batch = ``[5, 6]``,
next iteration will return ``[1, 2]``
last_batch_policy = LastBatchPolicy.DROP, last_batch_padded = False -> last batch = ``[5, 6]``,
next iteration will return ``[2, 3]``
Note:
JAX iterator does not support LastBatchPolicy.PARTIAL.
"""
def __init__(
self,
pipelines,
output_map,
size=-1,
reader_name=None,
auto_reset=False,
last_batch_padded=False,
last_batch_policy=LastBatchPolicy.FILL,
prepare_first_batch=True,
sharding=None):
# check the assert first as _DaliBaseIterator would run the prefetch
if len(set(output_map)) != len(output_map):
raise AssertionError("output_map names should be distinct")
self._output_categories = set(output_map)
self.output_map = output_map
if sharding is not None:
assert isinstance(sharding, (NamedSharding, PositionalSharding)), \
"`sharding` should be an instance of `NamedSharding` or `PositionalSharding`"
self._sharding = sharding
assert last_batch_policy != LastBatchPolicy.PARTIAL, \
"JAX iterator does not support partial last batch policy."
_DaliBaseIterator.__init__(
self,
pipelines,
size,
reader_name,
auto_reset,
None, # Default value for deprecated fill_last_batch argument
last_batch_padded,
last_batch_policy,
prepare_first_batch=prepare_first_batch)
self._first_batch = None
if self._prepare_first_batch:
try:
self._first_batch = DALIGenericIterator.__next__(self)
# call to `next` sets _ever_consumed to True but if we are just calling it from
# here we should set if to False again
self._ever_consumed = False
except StopIteration:
assert False, "It seems that there is no data in the pipeline. This may happen " \
"if `last_batch_policy` is set to PARTIAL and the requested batch size is " \
"greater than the shard size."
def _next_impl(self):
self._ever_consumed = True
if self._first_batch is not None:
batch = self._first_batch
self._first_batch = None
return batch
pipelines_outputs = self._get_outputs() # Can be accessed by outputs[device_id][output_id]
next_output = dict()
for category_id, category_name in enumerate(self.output_map):
category_outputs = self._gather_outputs_for_category(
pipelines_outputs, category_id)
if self._num_gpus == 1:
next_output[category_name] = category_outputs[0]
else:
self._assert_shards_shapes(category_outputs)
if self._sharding is not None:
next_output[category_name] = self._build_output_with_sharding(category_outputs)
else:
next_output[category_name] = self._build_output_with_device_put(
next_output, category_name, category_outputs)
self._schedule_runs()
self._advance_and_check_drop_last()
return next_output
def __next__(self):
return self._next_impl()
def _gather_outputs_for_category(self, pipelines_outputs, category_id):
category_outputs = []
for pipeline_id in range(self._num_gpus):
category_outputs.append(
_to_jax_array(pipelines_outputs[pipeline_id][category_id].as_tensor()))
return category_outputs
def _build_output_with_device_put(self, next_output, category_name, category_outputs):
"""Builds sharded jax.Array with `jax.device_put_sharded`. This output is compatible
with pmppped JAX functions.
"""
category_outputs_devices = tuple(map(
lambda jax_shard: jax_shard.device(),
category_outputs))
distinct_category_outputs_devices = set(category_outputs_devices)
if len(category_outputs_devices) != len(distinct_category_outputs_devices):
if len(distinct_category_outputs_devices) != 1:
raise AssertionError("JAX iterator requires shards to be placed on \
different devices or all on the same device.")
else:
# All shards are on one device (CPU or one GPU)
return jnp.stack(category_outputs)
else:
# Build sharded JAX array as output for current category (compatible with pmap)
return jax.device_put_sharded(category_outputs, category_outputs_devices)
def _build_output_with_sharding(self, category_outputs):
"""Builds sharded jax.Array with `jax.make_array_from_single_device_arrays`.
This output is compatible with automatic parallelization with JAX.
"""
shard_shape = category_outputs[0].shape
global_shape = (self._num_gpus * shard_shape[0], *shard_shape[1:])
return jax.make_array_from_single_device_arrays(
global_shape, self._sharding, category_outputs)
def _assert_shards_shapes(self, category_outputs):
for shard in category_outputs:
assert shard.shape == category_outputs[0].shape, \
"Shards shapes have to be the same."
|
DALI-main
|
dali/python/nvidia/dali/plugin/jax/iterator.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from nvidia.dali._multiproc import shared_mem
from nvidia.dali._multiproc.messages import ShmMessageDesc
from nvidia.dali._utils.external_source_impl import \
assert_cpu_sample_data_type as _assert_cpu_sample_data_type, \
sample_to_numpy as _sample_to_numpy
import pickle
np = None
def _div_ceil(a, b):
"""Calculate ceil of a/b without decaying to float."""
return -(-a // b)
def _align_up(x, alignment):
""" Align x up to multiple of alignment"""
return _div_ceil(x, alignment) * alignment
def import_numpy():
global np
if np is None:
try:
import numpy as np
except ImportError:
raise RuntimeError('Could not import numpy. Please make sure you have numpy '
'installed before you use parallel mode.')
_sample_error_msg = (
"Unsupported callback return type. Expected NumPy array, PyTorch or MXNet cpu tensors, "
"DALI TensorCPU, or list or tuple of them representing sample. Got `{}` instead.")
class BufShmChunk:
"""Simple wrapper around shared memory chunks. Adds mem_chunk_id used
to identify chunks in the communication between parent and worker process.
"""
def __init__(self, shm_chunk_id, capacity, shm_chunk: shared_mem.SharedMem):
self.shm_chunk_id = shm_chunk_id
self.capacity = capacity
self._shm_chunk = shm_chunk
def __getstate__(self):
state = self.__dict__.copy()
state['_shm_chunk'] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
@classmethod
def allocate(cls, shm_chunk_id, initial_chunk_size):
return cls(shm_chunk_id, initial_chunk_size,
shared_mem.SharedMem.allocate(initial_chunk_size))
def open_shm(self, handle):
# self._shm_chunk should be None only as a result of deserialization of the instance.
# In that case it is not valid to call other methods until shared memory chunk is restored
# with open_shm call
assert self._shm_chunk is None
try:
self._shm_chunk = shared_mem.SharedMem.open(handle, self.capacity)
except: # noqa: E722
if handle >= 0:
os.close(handle)
raise
def resize(self, size, trunc=False):
self._shm_chunk.resize(size, trunc)
self.capacity = size
def close(self):
self._shm_chunk.close()
def close_handle(self):
self._shm_chunk.close_handle()
@property
def handle(self):
return self._shm_chunk.handle
@property
def buf(self):
return self._shm_chunk.buf
class SampleMeta:
"""Metadata describing serialized sample in a memory buffer.
It is passed through memory, stored after sample it describes."""
def __init__(self, offset, shape, dtype, nbytes):
self.shape = shape
self.dtype = dtype
self.offset = offset
self.nbytes = nbytes
@classmethod
def from_np(cls, offset, np_array):
return cls(offset, np_array.shape, np_array.dtype, np_array.nbytes)
class SharedBatchMeta:
"""Describes offset within shared memory chunk and size of serialized list of
`SampleMeta` instances"""
def __init__(self, meta_offset, meta_size):
self.meta_offset = meta_offset
self.meta_size = meta_size
@classmethod
def from_writer(cls, writer):
return cls(writer.data_size, writer.meta_data_size)
def deserialize_sample(buffer: BufShmChunk, sample):
if isinstance(sample, SampleMeta):
offset = sample.offset
assert offset % sample.dtype.itemsize == 0, "Sample offset is misaligned."
buffer = buffer.buf[offset:offset + sample.nbytes]
return np.ndarray(sample.shape, dtype=sample.dtype, buffer=buffer)
if isinstance(sample, (tuple, list,)):
return type(sample)(deserialize_sample(buffer, part) for part in sample)
return sample
def deserialize_sample_meta(buffer: BufShmChunk, shared_batch_meta: SharedBatchMeta):
"""Helper to deserialize SampleMeta from memory based on SharedBatchMeta.
"""
sbm = shared_batch_meta
if sbm.meta_size == 0:
return []
pickled_meta = buffer.buf[sbm.meta_offset:sbm.meta_offset + sbm.meta_size]
samples_meta = pickle.loads(pickled_meta)
return samples_meta
def deserialize_batch(buffer: BufShmChunk, shared_batch_meta: SharedBatchMeta):
"""Deserialize samples from the smem buffer and SampleMeta descriptions.
Parameters
----------
buffer : BufShmChunk
Shared memory chunk with serialized sample data
shared_batch_meta : SharedBatchMeta
Metadata about serialized data in memory
Returns
-------
List of (idx, numpy array) or (idx, tuple of numpy arrays)
List of indexed deserialized samples
"""
samples = deserialize_sample_meta(buffer, shared_batch_meta)
return [deserialize_sample(buffer, sample) for sample in samples]
def assert_valid_data_type(sample):
"""Check if the output of the callback is type that can be serialized"""
_apply_to_sample(lambda x: _assert_cpu_sample_data_type(x, _sample_error_msg), sample)
def _apply_to_sample(func, sample, *args, nest_with_sample=0):
"""Apply to a sample traversing the nesting of the data (tuple/list).
Parameters
----------
func : callable
Function to be applied to every sample data object
sample : sample object or any nesting of those in tuple/list
Representation of sample
nest_with_sample: int
Specify how many consecutive (additional) arguments have the same level of nesting
as the sample.
"""
if isinstance(sample, (tuple, list,)):
# Check that all the samples have common nesting
for i in range(nest_with_sample):
assert len(args[i]) == len(sample)
nest_group = sample, *args[0:nest_with_sample]
scalar_args = args[nest_with_sample:]
return type(sample)(_apply_to_sample(func, *part, *scalar_args)
for part in zip(*nest_group))
else:
# we unpacked all nesting levels, now is actual data:
return func(sample, *args)
class SharedBatchWriter:
"""SharedBatchWriter can serialize and write batch into given shared
memory chunk (``shm_chunk``).
"""
SAMPLE_ALIGNMENT = 128
BUFFER_ALIGNMENT = 4096
def __init__(self, shm_chunk: BufShmChunk, batch, min_trailing_offset=1024 * 1024):
import_numpy()
self.shm_chunk = shm_chunk
self.data_size = 0
self.meta_data_size = 0
self.total_size = 0
# hint how much space should be left in case of the resize at the end of the shm chunk
# after batch data to accommodate meta data of the task
self.min_trailing_offset = min_trailing_offset
self._write_batch(batch)
def _prepare_samples_meta(self, samples):
"""Calculate metadata and total size of data to be serialized"""
data_size = 0
def make_meta(np_array):
nonlocal data_size
offset = _align_up(data_size, self.SAMPLE_ALIGNMENT)
data_size = offset + np_array.nbytes
return SampleMeta(offset, np_array.shape, np_array.dtype, np_array.nbytes)
meta = [_apply_to_sample(make_meta, sample) for sample in samples]
return meta, data_size
def _add_array_to_batch(self, np_array, meta, memview):
sample_size = meta.nbytes
offset = meta.offset
buffer = memview[offset:(offset + sample_size)]
shared_array = np.ndarray(
np_array.shape, dtype=np_array.dtype, buffer=buffer)
shared_array.ravel()[:] = np_array.ravel()[:]
def _write_batch(self, batch):
if not batch:
return
batch = [_apply_to_sample(lambda x: _sample_to_numpy(x, _sample_error_msg), sample)
for sample in batch]
meta, data_size = self._prepare_samples_meta(batch)
serialized_meta = pickle.dumps(meta)
self.meta_data_size = len(serialized_meta)
self.data_size = _align_up(data_size, self.SAMPLE_ALIGNMENT)
self.total_size = _align_up(self.data_size + self.meta_data_size, self.SAMPLE_ALIGNMENT)
if self.shm_chunk.capacity < self.total_size:
resize_shm_chunk(self.shm_chunk, self.total_size + self.min_trailing_offset)
memview = self.shm_chunk.buf
for sample, sample_meta in zip(batch, meta):
_apply_to_sample(self._add_array_to_batch, sample, sample_meta, memview,
nest_with_sample=1)
# copy meta data at the end of shared memory chunk
buffer = memview[self.data_size:(self.data_size + self.meta_data_size)]
buffer[:] = serialized_meta
def resize_shm_chunk(shm_chunk, needed_capacity):
new_capacity = max(needed_capacity, 2 * shm_chunk.capacity)
new_capacity = _align_up(new_capacity, SharedBatchWriter.BUFFER_ALIGNMENT)
shm_chunk.resize(new_capacity, trunc=True)
def read_shm_message(shm_chunk: BufShmChunk, shm_message):
if shm_message.shm_capacity != shm_chunk.capacity:
shm_chunk.resize(shm_message.shm_capacity, trunc=False)
buffer = shm_chunk.buf[shm_message.offset:shm_message.offset + shm_message.num_bytes]
return pickle.loads(buffer)
def write_shm_message(worker_id, shm_chunk: BufShmChunk, message, offset, resize=True):
"""
Pickles `message` instances, stores it in the provided `shm` chunk at given offset and returns
`ShmMessageDesc` instance describing the placement of the `message`.
Returned instance can be put into ShmQueue.
"""
serialized_message = pickle.dumps(message)
num_bytes = len(serialized_message)
if num_bytes > shm_chunk.capacity - offset:
if resize:
resize_shm_chunk(shm_chunk, offset + num_bytes)
else:
# This should not happen, resize is False only when writing task description into memory
# in the main process, and the description (ScheduledTask and its members) boils down
# to bounded number of integers.
raise RuntimeError("Could not put message into shared memory region,"
" not enough space in the buffer.")
buffer = shm_chunk.buf[offset:offset + num_bytes]
buffer[:] = serialized_message
return ShmMessageDesc(worker_id, shm_chunk.shm_chunk_id, shm_chunk.capacity, offset, num_bytes)
|
DALI-main
|
dali/python/nvidia/dali/_multiproc/shared_batch.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple
import threading
import traceback
import socket
from collections import deque
from multiprocessing import reduction
from nvidia.dali._utils.external_source_impl import SourceKind, _is_generator_function
from nvidia.dali._multiproc.shared_batch import SharedBatchWriter, SharedBatchMeta, BufShmChunk, \
assert_valid_data_type, read_shm_message, write_shm_message
from nvidia.dali._multiproc.messages import CompletedTask, WorkerArgs, ShmMessageDesc, ScheduledTask
from nvidia.dali._multiproc.shared_queue import Dispatcher
class _WorkerProcessingResult:
"""Internal worker message containing computed minibatch or error message sent from the main
thread to the dispatcher thread. The dispatcher thread serializes the batch or the error and
forwards the result as `CompletedTask` to the main process"""
def __init__(self, scheduled, shm_chunk, data_batch=None, exception=None,
traceback_str=None):
self.context_i = scheduled.context_i
self.scheduled_i = scheduled.scheduled_i
self.minibatch_i = scheduled.task.minibatch_i
self.shm_chunk = shm_chunk
self.data_batch = data_batch
self.exception = exception
self.traceback_str = traceback_str
@classmethod
def done(cls, scheduled, shm_chunk, data_batch):
return cls(scheduled, shm_chunk, data_batch)
@classmethod
def failed(cls, scheduled, shm_chunk, exception, traceback_str=None):
return cls(scheduled, shm_chunk, exception=exception, traceback_str=traceback_str)
def is_failed(self):
return self.exception is not None
class SharedBatchDispatcher(Dispatcher):
"""SharedBatchesDispatcher serializes batches, puts them into provided
shared memory chunks along with completed task description and puts information
about ready chunks into the `queue`. It processes tasks in a separate thread to
overlap serialization of minibatches with next minibatches computation in case of
a callback waiting on IO extensively and to avoid multiple worker processes
waiting on inter-process ShmQueue access"""
def __init__(self, worker_id, result_queue, recv_queues):
# close receiving queues if writing results fails to unblock
# the main thread that may be waiting on new tasks to process
def on_thread_exit():
for queue in recv_queues:
queue.close()
super().__init__(result_queue, on_thread_exit)
self.worker_id = worker_id
def _serialize_failed_task(self, processed_task: _WorkerProcessingResult):
"""
Puts CompletedTask instance (that describes an error encountered when producing batch)
in the provided shared memory chunk (`processed_task.shm_chunk`).
Returns `ShmMessageDesc` instance, that describes shared memory chunk and placement
offset=0, size) of the serialized CompletedTask instance in the chunk.
"""
shm_chunk = processed_task.shm_chunk
completed_task = CompletedTask.failed(self.worker_id, processed_task)
return write_shm_message(
self.worker_id, shm_chunk, completed_task, 0, resize=True)
def _serialize_done_task(self, processed_task: _WorkerProcessingResult):
"""
Puts produced batch in the provided shared memory chunk (`processed_task.shm_chunk`).
Layout of the data in the chunk:
[1. samples from the batch | 2. batch meta-data | 3. completed task].
1. Binary encoded samples from the batch (underlying data of numpy arrays),
aimed to be used as initialization buffers for arrays with no additional copy
or deserialization.
2. Pickled list of meta-data of each sample, such as the sample's binary data offset in
the chunk, a shape and a type of the array.
3. Pickled CompletedTask instance (that contains offset and size of the serialized list
from the second point).
Returns `ShmMessageDesc` instance, that describes shared memory chunk and placement
(offset, size) of the serialized CompletedTask instance in the chunk.
"""
shm_chunk = processed_task.shm_chunk
sbw = SharedBatchWriter(shm_chunk, processed_task.data_batch)
batch_meta = SharedBatchMeta.from_writer(sbw)
completed_task = CompletedTask.done(self.worker_id, processed_task, batch_meta)
return write_shm_message(
self.worker_id, shm_chunk, completed_task, sbw.total_size, resize=True)
def serialize_msgs(self, processed_tasks: List[_WorkerProcessingResult]):
shm_msgs = []
for processed_task in processed_tasks:
if processed_task.is_failed(): # one of the tasks failed
shm_msgs.append(self._serialize_failed_task(processed_task))
else:
shm_msgs.append(self._serialize_done_task(processed_task))
return shm_msgs
class SimpleQueueTaskReceiver:
"""
Simple wrapper around shm queue, pops first element from the queue
and returns
"""
def __init__(self, queue):
self.queue = queue
def get_task(self):
recv = self.queue.get()
if recv is None:
return
[task] = recv
return task
def get_recv_queues(self):
return [self.queue]
def close(self):
self.queue.close()
class MixedTaskReceiver:
"""
Mixes eager and idle worker threads each taking tasks from a different inter-process queue and
putting the tasks into a single (worker's internal) `task_queue`. Eager worker thread takes
tasks from the dedicated queue, i.e. tasks that can be processed only by the given worker
process. Idle worker thread takes tasks from the general queue, i.e. tasks that can be
processed by any worker process from the pool.
Eager worker reads tasks whenever any is available and moves them into the worker's internal
queue, whereas idle worker serves as a fallback that aims to read a single item only if
the internal queue is empty and the main thread does not process any task (is idle).
"""
class EagerReceiverWorker:
"""
Worker thread waiting for any tasks available in the inter-process queue
`dedicated_task_queue`. If anything is available, it takes all the items
and puts them into worker's internal task queue.
"""
def __init__(self, receiver_state, dedicated_task_queue):
self.receiver_state = receiver_state
self.dedicated_task_queue = dedicated_task_queue
self.thread = threading.Thread(target=self._receiver_loop, daemon=True)
self.thread.start()
def _receiver_loop(self):
try:
while True:
recv = self.dedicated_task_queue.get(num_samples=None)
if recv is None:
break
self.receiver_state.insert_task(recv)
finally:
self.receiver_state.insert_task(None)
def close(self):
self.dedicated_task_queue.close()
self.thread.join()
class IdleReceiverWorker:
"""
Worker thread that, when notified, takes a single task from the inter-process queue and
puts it into worker's internal task queue. It aims to take the task only if the main thread
reports it has no tasks to process - it rechecks that condition if it had to wait on empty
inter-process queue.
"""
def __init__(self, receiver_state, general_task_queue):
self.receiver_state = receiver_state
self.general_task_queue = general_task_queue
self.thread = threading.Thread(target=self._receiver_loop, daemon=True)
self.thread.start()
def _receiver_loop(self):
try:
while True:
if not self.receiver_state.wait_for_idle():
break
# Worker has no dedicated work to do (is idle), so take one task from
# general queue.
# If general queue is empty, the call will block and then
# recheck the condition
recv_pred = self.receiver_state.is_idle_and_uninterrupted
recv = self.general_task_queue.get(predicate=recv_pred)
if recv is None:
break
# if `is_idle_and_uninterrupted` returned False, recv is an empty list
if len(recv):
self.receiver_state.insert_task(recv)
finally:
self.receiver_state.insert_task(None)
def close(self):
self.receiver_state.interrupt_idle()
self.general_task_queue.close()
self.thread.join()
class MixedReceiverState:
def __init__(self):
self.lock = threading.Lock()
self.tasks_cv = threading.Condition(lock=self.lock)
self.idle_cv = threading.Condition(lock=self.lock)
self.is_idle = False
self.is_interrupted = False
self.task_queue = deque()
def _is_idle_state(self):
return self.is_idle and len(self.task_queue) == 0
def is_idle_and_uninterrupted(self):
with self.lock:
return not self.is_interrupted and self._is_idle_state()
def wait_for_idle(self):
with self.lock:
while not self.is_interrupted and not self._is_idle_state():
self.idle_cv.wait()
return not self.is_interrupted
def interrupt_idle(self):
with self.lock:
self.is_interrupted = True
self.idle_cv.notify()
def insert_task(self, recv):
with self.lock:
if recv is None:
self.task_queue.appendleft(recv)
else:
self.task_queue.extend(recv)
self.tasks_cv.notify()
def get_task(self):
with self.lock:
waited = False
while len(self.task_queue) == 0:
# there's only one consumer of task_queue,
# so no stealing of tasks between waits can happen
if not waited:
waited = True
self.is_idle = True
self.idle_cv.notify()
self.tasks_cv.wait()
self.is_idle = False
task = self.task_queue.popleft()
return task
def __init__(self, dedicated_task_queue, general_task_queue):
self.dedicated_task_queue = dedicated_task_queue
self.general_task_queue = general_task_queue
self.state = self.MixedReceiverState()
self.receivers = []
try:
self.receivers.append(self.EagerReceiverWorker(self.state, self.dedicated_task_queue))
self.receivers.append(self.IdleReceiverWorker(self.state, self.general_task_queue))
except: # noqa E722
self.close()
raise
def get_recv_queues(self):
return [self.general_task_queue, self.dedicated_task_queue]
def get_task(self):
return self.state.get_task()
def close(self):
for receiver in self.receivers:
receiver.close()
self.receivers.clear()
class IterableSource:
"""Wraps iterator/generator passed to External Source to enforce
ES `cycle` policy specified by the user.
It is a counterpart of _CycleIter/_CycleGenIter wrappers from non parallel mode.
However due to prefetching in parallel mode `cycle`=raise
will raise StopIteration in consecutive calls until the new epoch starts
(i.e. which happens with pipline.reset call)"""
def __init__(self, source_desc):
self.source_desc = source_desc
self._reset_iter(0)
def __call__(self, scheduled: ScheduledTask):
if self.raised_stop_iter:
# if iterator runs in "raise" mode and a new epoch started
# (i.e. source context was reset)
if self.source_desc.cycle == "raise" and self.epoch_start < scheduled.epoch_start:
self._reset_iter(scheduled.epoch_start)
else:
raise StopIteration
return self._get_next()
def _reset_iter(self, epoch_start):
self.iter = IterableSource.get_iter(self.source_desc)
self.raised_stop_iter = False
self.epoch_start = epoch_start
def _get_next(self):
try:
return next(self.iter)
except StopIteration:
self.raised_stop_iter = True
if self.source_desc.cycle != "quiet" and self.source_desc.cycle is not True:
raise
# in quiet mode immediately reset the source and return the first iteration
self.iter = IterableSource.get_iter(self.source_desc)
next_iter = next(self.iter)
# Set the `raised_stop_iter` flag to False after the __next__ call, so that,
# if it raises StopIteration immediately after the reset, the wrapper can consistently
# raise StopIteration from then on.
# The `epoch_start` is not updated - keeping track of it is not necessary
# in the quiet mode
self.raised_stop_iter = False
return next_iter
@staticmethod
def get_iter(source_desc):
source = source_desc.source
if _is_generator_function(source):
source = source()
return iter(source)
class CallableSource:
def __init__(self, source_desc):
self.callback = source_desc.source
def __call__(self, scheduled: ScheduledTask):
task = scheduled.task
if task.is_sample_mode():
data_batch = [self.callback(sample_info) for sample_info in task.sample_range]
else:
data_batch = self.callback(*task.batch_args)
return data_batch
def get_source_from_desc(source_descs):
if source_descs.kind == SourceKind.CALLABLE:
return CallableSource(source_descs)
elif source_descs.kind in (SourceKind.ITERABLE, SourceKind.GENERATOR_FUNC):
return IterableSource(source_descs)
raise RuntimeError("Unsupported source type")
class WorkerContext:
"""Initializes structures necessary for a worker process to receive,
compute and send back tasks."""
def __init__(self, worker_args: WorkerArgs):
self.worker_id = worker_args.worker_id
self.callbacks = self._init_callbacks(worker_args.source_descs,
worker_args.callback_pickler)
self.result_queue = worker_args.result_queue
self.general_task_queue = worker_args.general_task_queue
self.dedicated_task_queue = worker_args.dedicated_task_queue
shm_chunks = worker_args.shm_chunks
if worker_args.start_method != "fork":
setup_socket = worker_args.setup_socket
# NOTE when making any changes here, make sure to reflect them in the main process,
# so that it sends handles to objects in the same order they are set to objects here
self._recv_queue_handles(setup_socket)
for shm_chunk in shm_chunks:
shm_chunk.open_shm(reduction.recv_handle(setup_socket))
setup_socket.shutdown(socket.SHUT_RDWR)
setup_socket.close()
self.shm_chunks = {shm_chunk.shm_chunk_id: shm_chunk for shm_chunk in shm_chunks}
self.task_receiver = None
self.batch_dispatcher = None
try:
self.task_receiver = self._init_task_receiver()
self.batch_dispatcher = SharedBatchDispatcher(
worker_args.worker_id, worker_args.result_queue,
self.task_receiver.get_recv_queues())
except: # noqa E722
self.close()
raise
# let the main process know that the worker started and shared resources setup is done
worker_args.result_queue.put([ShmMessageDesc(self.worker_id, 0, 0, 0, 0)])
def _init_callbacks(self, source_descs, callback_pickler):
if callback_pickler is not None:
for source_desc in source_descs.values():
source_desc.source = callback_pickler.loads(source_desc.source)
return {
context_i: get_source_from_desc(source_desc)
for context_i, source_desc in source_descs.items()}
def _recv_queue_handles(self, setup_socket):
self.result_queue.open_shm(reduction.recv_handle(setup_socket))
if self.general_task_queue is not None:
self.general_task_queue.open_shm(reduction.recv_handle(setup_socket))
if self.dedicated_task_queue is not None:
self.dedicated_task_queue.open_shm(reduction.recv_handle(setup_socket))
def _init_task_receiver(self):
assert self.general_task_queue is not None or self.dedicated_task_queue is not None
if self.dedicated_task_queue is None or self.general_task_queue is None:
return SimpleQueueTaskReceiver(self.general_task_queue or self.dedicated_task_queue)
return MixedTaskReceiver(self.dedicated_task_queue, self.general_task_queue)
def get_task(self) -> Tuple[Optional[ScheduledTask], Optional[BufShmChunk]]:
"""
Returns scheduled task and shm_chunk where results should be placed
"""
scheduled_meta = self.task_receiver.get_task()
if scheduled_meta is None:
return None, None
shm_chunk = self.shm_chunks[scheduled_meta.shm_chunk_id]
scheduled = read_shm_message(shm_chunk, scheduled_meta)
return scheduled, shm_chunk
def get_callback(self, scheduled):
return self.callbacks[scheduled.context_i]
def dispatch(self, processed: _WorkerProcessingResult):
return self.batch_dispatcher.append(processed)
def close(self):
if self.batch_dispatcher is not None:
self.task_receiver.close()
if self.task_receiver is not None:
self.batch_dispatcher.close()
def worker(worker_args: WorkerArgs):
"""Entry point of a worker process.
Computes minibatches in the main thread.
"""
worker_context = WorkerContext(worker_args)
try:
while True:
scheduled, shm_chunk = worker_context.get_task()
if scheduled is None:
break
callback = worker_context.get_callback(scheduled)
try:
data_batch = callback(scheduled)
for sample in data_batch:
assert_valid_data_type(sample)
except Exception as exception:
tb_str = traceback.format_exc()
processed = _WorkerProcessingResult.failed(scheduled, shm_chunk, exception, tb_str)
else:
processed = _WorkerProcessingResult.done(scheduled, shm_chunk, data_batch)
worker_context.dispatch(processed)
finally:
worker_context.close()
|
DALI-main
|
dali/python/nvidia/dali/_multiproc/worker.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from nvidia.dali import backend as _b
class SharedMem:
"""SharedMem allows you to allocate and access shared memory.
Provides memory view of the allocated memory via buf property.
You can transfer access to the same shared memory chunk by sending related shared memory
handle (file descriptor on Unix) available as handle property. Use SharedMem.allocate
to allocate new chunk of shared memory and SharedMem.open if you received handle to already
existing memory chunk.
There is out of the box support for shared memory starting from Python3.8, though
the only way there to transfer the memory to other processes is via filename,
which might 'leak' if process was closed abruptly.
Parameters
----------
`handle` : int
Handle identifying related shared memory object. Pass None to allocate new memory chunk.
`size` : int
When handle=None it is the size of shared memory to allocate in bytes, otherwise it must be
the size of shared memory objects that provided handle represents.
"""
def __init__(self, handle, size):
if handle is None:
handle = -1
self.shm = _b.SharedMem(handle, size)
self.capacity = size
def __getattr__(self, key):
# lazily evaluate and cache 'buf' property, so that it is created only once
# and only when requested
if key == 'buf':
buf = self.shm.buf()
self.__dict__['buf'] = buf
return buf
raise AttributeError
@classmethod
def allocate(cls, size):
"""Creates new SharedMem instance representing freshly allocated
shared memory of ``size`` bytes.
Parameters
----------
`size` : int
Number of bytes to allocate.
"""
return cls(None, size)
@classmethod
def open(cls, handle, size):
"""Creates new SharedMem instance that points to already allocated shared
memory chunk accessible via provided shared memory ``handle``.
Parameters
----------
`handle`: int
Handle pointing to already existing shared memory chunk.
`size` : int
Size of the existing shared memory chunk.
"""
instance = cls(handle, size)
assert os.fstat(handle).st_size >= size
return instance
@property
def handle(self):
"""Shared memory handle (file descriptor on Unix), use it to transfer access
to the shared memory object to another process.
You can transfer it between processes via socket using multiprocessing.reduction.send_handle
"""
return self.shm.handle
def resize(self, size, trunc=False):
"""Resize already allocated shared memory chunk. If you want to resize the underlying
shared memory chunk pass trunc=True, if the memory chunk has already been resized
via another SharedMem instance (possibly in another process), pass new size and
trunc=False to simply adjust mmaping of the memory into the current process address space.
"""
if 'buf' in self.__dict__:
del self.__dict__['buf']
self.shm.resize(size, trunc)
self.capacity = size
def close(self):
"""Removes maping of the memory into process address space and closes related handle.
If all processes sharing given chunk close it, it will be automatically released by the OS.
You don't have to call this method, as corresponding clean up is performed when instance
gets garbage collected but you can call it as soon as you no longer need it for more
effective resources handling.
"""
self.buf = None
self.shm.close()
def close_handle(self):
"""Closes OS handle for underlying shared memory. From now on, the process cannot resize the
underlying memory with this handle but still can adjust the mapping if the underlying
shared memory is resized, for instance, by another process.
This means that call to resize with ``trunc``= True will be illegal.
"""
self.shm.close_handle()
|
DALI-main
|
dali/python/nvidia/dali/_multiproc/shared_mem.py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import os
import threading
from nvidia.dali._multiproc import shared_mem
from nvidia.dali._multiproc.messages import Structure, ShmMessageDesc
from nvidia.dali._multiproc.shared_batch import _align_up as align_up
class QueueMeta(Structure):
_fields = ("capacity", "i"), ("size", "i"), ("begining", "i"), ("is_closed", "i")
class ShmQueue:
"""
Simple fixed capacity shared memory queue of fixed size messages.
Writting to a full queue fails, attempt to get from an empty queue blocks until data is
available or the queue is closed.
"""
MSG_CLASS = ShmMessageDesc
ALIGN_UP_MSG = 4
ALIGN_UP_BUFFER = 4096
def __init__(self, mp, capacity):
self.lock = mp.Lock()
self.cv_not_empty = mp.Condition(self.lock)
self.capacity = capacity
self.meta = QueueMeta(capacity, 0, 0, 0)
self.meta_size = align_up(self.meta.get_size(), self.ALIGN_UP_MSG)
dummy_msg = self.MSG_CLASS()
self.msg_size = align_up(dummy_msg.get_size(), self.ALIGN_UP_MSG)
self.shm_capacity = align_up(self.meta_size + capacity * self.msg_size,
self.ALIGN_UP_BUFFER)
self.shm = shared_mem.SharedMem.allocate(self.shm_capacity)
self.is_closed = False
self._init_offsets()
self._write_meta()
def __getstate__(self):
state = self.__dict__.copy()
state['msgs_offsets'] = None
state['shm'] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._init_offsets()
def _init_offsets(self):
self.msgs_offsets = [i * self.msg_size + self.meta_size for i in range(self.capacity)]
def _read_meta(self):
self.meta.unpack_from(self.shm.buf, 0)
def _write_meta(self):
self.meta.pack_into(self.shm.buf, 0)
def _read_msg(self, i):
offset = self.msgs_offsets[i]
msg = self.MSG_CLASS()
msg.unpack_from(self.shm.buf, offset)
return msg
def _write_msg(self, i, msg):
offset = self.msgs_offsets[i]
msg.pack_into(self.shm.buf, offset)
def _recv_samples(self, num_samples):
num_take = self.meta.size
if num_samples is not None and num_samples < num_take:
num_take = num_samples
recv = [self._read_msg((self.meta.begining + i) % self.meta.capacity)
for i in range(num_take)]
self.meta.size -= num_take
self.meta.begining = (self.meta.begining + num_take) % self.meta.capacity
self._write_meta()
return recv
def _wait_for_samples(self):
waited = False
self._read_meta()
while not self.meta.size > 0 and not self.meta.is_closed:
self.cv_not_empty.wait()
waited = True
self._read_meta()
return waited
def open_shm(self, handle, close_handle=True):
try:
shm = shared_mem.SharedMem.open(handle, self.shm_capacity)
self.shm = shm
if close_handle:
shm.close_handle()
except: # noqa: E722
if close_handle:
os.close(handle)
raise
def close_handle(self):
self.shm.close_handle()
def close(self):
if self.is_closed:
return
with self.lock:
self._read_meta()
self.is_closed = True
if not self.meta.is_closed:
self.meta.is_closed = 1
self._write_meta()
# Notify only one waiting worker about closing the queue, the woken up worker
# will notify the next one. Avoid notify_all at this point, due to possible
# deadlock if one of the notified workers exited abruptly when waiting
# on cv_not_empty without proper releasing of the underlying semaphore.
self.cv_not_empty.notify()
def put(self, msgs: List[MSG_CLASS]) -> Optional[int]:
assert len(msgs), "Cannot write an empty list of messages"
if self.is_closed:
return
with self.lock:
self._read_meta()
if self.meta.size + len(msgs) > self.meta.capacity:
raise RuntimeError("The queue is full")
if self.meta.is_closed:
self.is_closed = True
return
msgs_len = len(msgs)
next_slot = (self.meta.begining + self.meta.size) % self.meta.capacity
for msg in msgs:
self._write_msg(next_slot, msg)
next_slot = (next_slot + 1) % self.meta.capacity
self.meta.size += msgs_len
self._write_meta()
self.cv_not_empty.notify()
return msgs_len
def get(self, num_samples=1, predicate=None) -> Optional[List[MSG_CLASS]]:
"""
Args:
----------
num_samples : optional positive integer
Maximal number of messages to take from the queue, if set to None all available messages
will be taken. The call blocks until there are any messages available.
It may return less than `num_samples`, but an empty list is returned only if `predicate`
was specified and it evaluated to False after waiting on empty queue.
The call returns None iff the queue was closed.
predicate : a parameterless callable
Used for double-checking if the item should really be taken after waiting on empty
queue.
"""
if self.is_closed:
return
with self.cv_not_empty: # equivalent to `with self.lock`
waited = self._wait_for_samples()
if self.meta.is_closed:
self.is_closed = True
self.cv_not_empty.notify()
return
if waited and predicate is not None and not predicate():
recv = []
else:
recv = self._recv_samples(num_samples)
if self.meta.size > 0:
self.cv_not_empty.notify()
return recv
class Dispatcher:
"""Wrapper around the queue that enables writing to the queue in a separate thread, just in
case a writing process would have to wait too long for a lock on the queue when multiple
readers pop the items one by one."""
def __init__(self, target_queue, on_thread_exit=None):
self.pending_cv = threading.Condition()
self.pending = []
self.target_queue = target_queue
self.on_thread_exit = on_thread_exit
self.thread = threading.Thread(target=self._dispatch_loop, daemon=True)
self.thread.start()
def _dispatch_loop(self):
try:
while True:
with self.pending_cv:
while len(self.pending) == 0:
self.pending_cv.wait()
msgs = list(self.pending)
self.pending.clear()
if any(msg is None for msg in msgs):
break
msgs = self.serialize_msgs(msgs)
if self.target_queue.put(msgs) is None:
break
finally:
if self.on_thread_exit is not None:
self.on_thread_exit()
def close(self):
self.target_queue.close()
self.stop_thread()
def stop_thread(self):
if self.thread is not None:
self.append(None)
self.thread.join()
self.thread = None
def extend(self, msgs):
with self.pending_cv:
self.pending.extend(msgs)
self.pending_cv.notify()
def append(self, msg):
with self.pending_cv:
self.pending.append(msg)
self.pending_cv.notify()
def serialize_msgs(self, msgs):
raise NotImplementedError
|
DALI-main
|
dali/python/nvidia/dali/_multiproc/shared_queue.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
DALI-main
|
dali/python/nvidia/dali/_multiproc/__init__.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from nvidia.dali.types import SampleInfo
from nvidia.dali._multiproc.struct_message import Structure
class ShmMessageDesc(Structure):
"""
Type of C-struct like message exchanged via shared memory queue (`ShmQueue`).
It describes placement (shared memory chunk, offset etc.) of actual data to be read
by the receiver of the `ShmMessageDesc` instance.
----------
`worker_id` : int
Intger identifying a process that put the message, number from [0, num_workers) range
for workers or -1 in case of a main process.
`shm_chunk_id` : int
Integer identifying shm chunk that contains pickled data to be read by the receiver
`shm_capacity` : unsigned long long int
Size of the `shm_chunk_id` chunk, receiver should resize the mapping if the chunk
was resized by the writer.
`offset` : unsigned long long int
Offset in the shm chunk where the serialized message starts
`num_bytes` : unsigned long long int
Size in bytes of the serialized message
"""
_fields = (("worker_id", "i"),
("shm_chunk_id", "i"),
("shm_capacity", "Q"),
("offset", "Q"), ("num_bytes", "Q"))
class WorkerArgs:
"""
Pack of parameters passed to the worker process on initialization.
----------
`worker_id` : Ordinal of the worker in the workers pool
`start_method` : Python's multiprocessing start method - `spawn` or `fork`
`source_descs` : Dictionary with External Source's SourceDescription instances as values.
Keys are ordinals corresponding to the order in which callbacks were passed to the pool.
If `callback_pickler` is not None, actual callback in SourceDescription is replaced
with result of its serialization.
`shm_chunks` : list of BufShmChunk instances that describes all the shared memory chunks
available to the worker (they are identified by ids unique inside the pool).
`general_task_queue` : Optional[ShmQueue]
Queue with tasks for sources without dedicated worker
or None if all sources have dedicated worker
`dedicated_task_queue`: Optional[ShmQueue]
Queue with tasks for sources that are run solely in the given worker.
If `dedicated_task_queue` is None, `general_task_queue` must be provided.
`result_queue`: ShmQueue
Queue to report any task done, no matter if dedicated or general.
`setup_socket` : Optional[socket]
Python wrapper around Unix socket used to pass file descriptors identifying
shared memory chunk to child process. None if `start_method='fork'`
`callback_pickler`
Optional custom pickler that was applied to serialize callbacks in `source_descs`"""
def __init__(self, *, worker_id, start_method, source_descs, shm_chunks, general_task_queue,
dedicated_task_queue, result_queue, setup_socket, callback_pickler):
self.worker_id = worker_id
self.start_method = start_method
self.source_descs = source_descs
self.shm_chunks = shm_chunks
self.general_task_queue = general_task_queue
self.dedicated_task_queue = dedicated_task_queue
self.result_queue = result_queue
self.setup_socket = setup_socket
self.callback_pickler = callback_pickler
class SampleRange:
"""
Describes a batch or sub-batch of work in sample mode that consists of SampleInfo
instances with consecutive indices. It denotes range of samples within given `iteration`
of given `epoch_idx`, optionally specifying a slice/sub-range of the sample range.
It does not support spanning over multiple batches. Used to avoid linear dependency of the task
description size on the batch size.
"""
def __init__(self, sample_start, sample_end, iteration, epoch_idx, *,
slice_start=0, slice_end=None):
self.sample_start = sample_start # idx in epoch of first sample in batch
self.sample_end = sample_end # idx in epoch of one past last sample in batch
self.iteration = iteration # index of a batch within epoch
self.epoch_idx = epoch_idx
if slice_end is None:
slice_end = sample_end - sample_start
assert slice_start >= 0 and slice_start <= sample_end - sample_start
assert slice_end >= slice_start and slice_end <= sample_end - sample_start
# idx of first sample in slice (in a batch not an epoch)
self.slice_start = slice_start
# idx of one past last sample in slice (in a batch not an epoch)
self.slice_end = slice_end
def _get_index(self, idx, bound):
if idx is None:
return bound
if idx < 0:
return self.slice_end + idx
return self.slice_start + idx
def _get_slice(self, range_slice: slice):
if range_slice.step is not None and range_slice.step != 1:
raise ValueError("SampleRange only supports slicing with step 1")
slice_start = self._get_index(range_slice.start, self.slice_start)
slice_end = self._get_index(range_slice.stop, self.slice_end)
slice_start = min(slice_start, self.slice_end)
slice_end = max(min(slice_end, self.slice_end), slice_start)
return SampleRange(
self.sample_start, self.sample_end,
self.iteration, self.epoch_idx,
slice_start=slice_start,
slice_end=slice_end)
def __getitem__(self, idx):
if isinstance(idx, slice):
return self._get_slice(idx)
if idx < 0:
idx_in_batch = self.slice_end + idx
else:
idx_in_batch = self.slice_start + idx
if idx_in_batch < self.slice_start or idx_in_batch >= self.slice_end:
raise IndexError("Index {} out of range for slice of length {}".format(idx, len(self)))
return SampleInfo(
self.sample_start + idx_in_batch,
idx_in_batch,
self.iteration,
self.epoch_idx)
def __len__(self):
return self.slice_end - self.slice_start
class TaskArgs:
@classmethod
def make_sample(cls, sample_range):
if len(sample_range) <= 0:
raise RuntimeError("Cannot schedule empty batch")
return cls(0, sample_range=sample_range)
@classmethod
def make_batch(cls, batch_args):
return cls(0, batch_args=batch_args)
def __init__(self, minibatch_i, sample_range: Optional[SampleRange] = None, batch_args=None):
self.minibatch_i = minibatch_i
self.sample_range = sample_range
self.batch_args = batch_args
assert (self.sample_range is None) != (self.batch_args is None)
def is_sample_mode(self):
return self.sample_range is not None
class ScheduledTask:
"""Message sent from the pool to a worker to schedule tasks for the worker
Parameters
----------
`context_i` : int
Index identifying the callback in the order of parallel callbacks passed to pool.
`scheduled_i` : int
Ordinal of the batch that tasks list corresponds to.
`epoch_start` : int
The value is increased every time the corresponding context is resetted,
this way worker can know if the new epoch started, and if it can restart
iterator that raised StopIteration but is set to cycle=raise.
`task` : TaskArgs
Describes the minibatch that should be computed by the worker. If the given source
is run in batch mode this simply wraps parameters that external source would pass to
the source in non-parallel mode. In sample mode, it is (part of) the list
of nvidia.dali.types.SampleInfo produced by the external source.
"""
def __init__(self, context_i, scheduled_i, epoch_start, task: TaskArgs):
self.context_i = context_i
self.scheduled_i = scheduled_i
self.epoch_start = epoch_start
self.task = task
class CompletedTask:
"""Message sent from a worker to the pool to notify the pool about completed tasks
along with meta data needed to fetch and deserialize results stored in the shared memory
Parameters
----------
`worker_id` : int
Id of the worker that completed the task.
`context_i` : int
Index identifying the callback in the order of parallel callbacks passed to pool.
`scheduled_i` : int
Ordinal of the batch that tasks corresponds to.
`minibatch_i` : int
Computation of batch might be split into number of minibatches, this is the number
that identifies which consecutive part of the batch it is.
`batch_meta` : nvidia.dali._multiproc.shared_batch.SharedBatchMeta
Serialized result of the task.
`exception`
Exception if the task failed.
"""
def __init__(
self, worker_id, context_i, scheduled_i, minibatch_i, batch_meta=None,
exception=None, traceback_str=None):
self.worker_id = worker_id
self.context_i = context_i
self.scheduled_i = scheduled_i
self.minibatch_i = minibatch_i
self.batch_meta = batch_meta
self.exception = exception
self.traceback_str = traceback_str
@classmethod
def done(cls, worker_id, processed, batch_meta):
return cls(worker_id, processed.context_i, processed.scheduled_i, processed.minibatch_i,
batch_meta=batch_meta)
@classmethod
def failed(cls, worker_id, processed):
return cls(worker_id, processed.context_i, processed.scheduled_i, processed.minibatch_i,
exception=processed.exception, traceback_str=processed.traceback_str)
def is_failed(self):
return self.exception is not None
|
DALI-main
|
dali/python/nvidia/dali/_multiproc/messages.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple, Any, Optional
import os
import socket
import threading
import warnings
import multiprocessing
import copy
from collections import deque
from nvidia.dali import backend as _b
from nvidia.dali import pickling
from nvidia.dali._utils.external_source_impl import SourceDescription, SourceKind
from nvidia.dali._multiproc.worker import worker
from nvidia.dali._multiproc.messages import ScheduledTask, TaskArgs, WorkerArgs
from nvidia.dali._multiproc.shared_batch import deserialize_batch, import_numpy, read_shm_message, \
BufShmChunk, SharedBatchWriter, write_shm_message, _align_up as align_up
from nvidia.dali._multiproc.shared_queue import ShmQueue
"""
A pipline with parallel external sources creates `WorkerPool` to parallelize sources computation.
Each external source in the pipline has its own `ShmChunkManager` with a view on shm chunks
dedicated to data computed by the given source. Those chunks are also used to pass minibatch (task)
description to the workers. All the chunks for all the external sources in the pipeline
are created and stored in the common list shared by `ShmChunkManager` instances (index in the list
makes a unique id for a chunk used in communication between workers and the pool), but a single
chunk is always allocated and used by a single `ShmChunkManager` instance.
`CallbackContext` combines the source (callback, iterator or generator function)
with its `ShmChunkManager` instance and contains optional dedicated_worker_id if the source is
stateful and needs to be run in a single dedicated worker. Context keeps track of scheduled tasks
and partial results received from the workers for the given source.
`Pool` manages actual worker processes and communication between them and the main process,
it is responsible for starting the workers and additional setup steps (such as passing shm chunks
through sockets if `spawn` start method is used).
`Pool` instance uses `ShmQueue` to communicate and synchronize with workers: actual tasks and data
are serialized and put in shm chunks from ShmChunkManagers, whereas messages in ShmQueue contain
only simple fixed-size meta data such as the id of shm chunk from the buffer to be read,
current capacity of the shm chunk (which may increase if worker couldn't fit the data)
and offset of the data.
If the pipline contains any stateless source, that can be run in parall, all the workers will share
a common ShmQueue instance with tasks related to any such source.
If some source gets a dedicated worker assigned, the worker will receive a dedicated queue not
shared with other workers and will receive all the dedicated tasks there.
Thus, a worker can have up to two queues with tasks.
Additionally there is a single result queue shared by all the workers, that is used to notify main
process about completed tasks being ready for consumption by the main process.
"""
class ShmChunkManager:
"""Two dimensional buffer of shared memory chunks (queue_depth X num_minibatches),
chunks can be accessed either by providing two coordinates or via shm chunk's unique id.
Each ExternalSource callback gets its own buffer, first dimension is cycled
over when scheduling and receiving consecutive batches, second dimension is
used to separate minibatches."""
def __init__(self, shm_pool: List[BufShmChunk], queue_depth,
initial_chunk_capacity, num_minibatches):
if queue_depth < 1:
raise RuntimeError("Prefetch queue must have at least one element")
if initial_chunk_capacity < 1:
raise RuntimeError("Buffer chunk capacity must be a positive integer")
self.shm_pool = shm_pool
self.queue_depth = queue_depth
self.initial_chunk_capacity = align_up(initial_chunk_capacity,
SharedBatchWriter.BUFFER_ALIGNMENT)
self.num_minibatches = num_minibatches
self.chunks_ids_by_pos = []
for _ in range(self.queue_depth):
self.chunks_ids_by_pos.append(
[self.allocate_chunk(self.initial_chunk_capacity)
for _ in range(self.num_minibatches)])
self.chunks_ids = [chunk_id for dest_buf in self.chunks_ids_by_pos
for chunk_id in dest_buf]
def allocate_chunk(self, capacity):
chunk_id = len(self.shm_pool)
chunk = BufShmChunk.allocate(chunk_id, capacity)
self.shm_pool.append(chunk)
return chunk_id
def close_handles(self):
for shm_chunk_id in self.chunks_ids:
self.shm_pool[shm_chunk_id].close_handle()
def get_chunk_by_id(self, shm_chunk_id):
return self.shm_pool[shm_chunk_id]
def get_chunk_by_dest(self, dest_i, minibatch_i):
chunk_id = self.chunks_ids_by_pos[dest_i][minibatch_i]
return self.get_chunk_by_id(chunk_id)
def get_chunks(self):
return [self.get_chunk_by_id(chunk_id) for chunk_id in self.chunks_ids]
@property
def num_chunks(self):
return len(self.chunks_ids)
class CallbackContext:
"""Keeps track of tasks and partially received results for a given source.
Contains source description, dedicated ShmChunkManager instance and
information about dedicated worker id if applicable."""
def __init__(self, source_desc: SourceDescription,
shm_manager: ShmChunkManager,
dedicated_worker_id: Optional[int]):
self.source_desc = source_desc
self.shm_manager = shm_manager
self.dedicated_worker_id = dedicated_worker_id
# counts all the batches ever scheduled for the given source, serves as
# id for the next scheduled tasks
self.scheduled_i = 0
# counts all the batches ever returned from the context, used to calculate position
# in the ShmChunkManager circular buffer for a next batch
self.produced_i = 0
self.epoch_start = self.scheduled_i
self.partially_received = {}
self.scheduled_minibatches = {}
self.iter_failed = {}
self.task_queue = deque()
self.epoch_synced = True
def reset(self):
"""Invalidates all batches pending in `task_queue`, marks the need to sync the epoch
before `ShmChunkManager` chunks occupied by `task_queue` batches can be be reused for
prefetching in the next epoch"""
self.epoch_start = self.scheduled_i
self.epoch_synced = False
def push_scheduled(self, num_minibatches):
scheduled_i = self.scheduled_i
self.scheduled_i += 1
self.partially_received[scheduled_i] = {}
self.scheduled_minibatches[scheduled_i] = num_minibatches
dest_chunk_i = (self.produced_i + self.scheduled_ahead) % self.queue_depth
self.task_queue.append(scheduled_i)
return scheduled_i, dest_chunk_i
def clear_scheduled(self, scheduled_i):
del self.partially_received[scheduled_i]
del self.scheduled_minibatches[scheduled_i]
if scheduled_i in self.iter_failed:
del self.iter_failed[scheduled_i]
def pop_scheduled(self):
return self.task_queue.popleft()
def handle_error(self, batch_i):
"""Check if given batch reported an error and raise it"""
if batch_i in self.iter_failed:
exception, traceback_str = self.iter_failed[batch_i]
try:
self.clear_scheduled(batch_i)
if isinstance(exception, StopIteration):
raise exception
else:
# Raise new exception propagating the traceback from worker thread as error
# message, originating from original exception
raise Exception("\n\nException traceback received from worker thread:\n\n"
+ traceback_str) from exception
finally:
# Fix circular reference problem on StopIteration - the exception contains
# reference to the traceback that refers a frame that contains local variables
# and among them the exception.
# This traceback is then chained into exceptions reraised along the way
# (eventually at the pipeline level) which in effect introduces a reference to
# the pipline that would be only removed after garbage collection round,
# delaying finalization of the pool
del exception
def is_error(self, scheduled_i):
return scheduled_i in self.iter_failed
def process_task(self, shm_chunk: BufShmChunk, completed_task):
scheduled_i = completed_task.scheduled_i
if completed_task.is_failed():
if not self.is_error(scheduled_i):
self.set_error(completed_task)
self.mark_received(completed_task)
else:
if self.is_stale(scheduled_i) or self.is_error(scheduled_i):
self.mark_received(completed_task)
else:
self.receive_chunk(shm_chunk, completed_task)
def mark_received(self, completed_task):
scheduled_i = completed_task.scheduled_i
minibatch_i = completed_task.minibatch_i
self.partially_received[scheduled_i][minibatch_i] = None
def set_error(self, completed_task):
scheduled_i = completed_task.scheduled_i
self.iter_failed[scheduled_i] = (completed_task.exception, completed_task.traceback_str)
def is_stale(self, scheduled_i):
return scheduled_i < self.epoch_start
def is_not_received(self, scheduled_i):
return len(self.partially_received[scheduled_i]) < self.scheduled_minibatches[scheduled_i]
def coalesce_received(self, scheduled_i):
num_minibatches = self.scheduled_minibatches[scheduled_i]
minibatches = self.partially_received[scheduled_i]
if num_minibatches == 1:
return minibatches[0]
return [sample for minibatch_i in range(num_minibatches)
for sample in minibatches[minibatch_i]]
def take_processed(self, scheduled_i):
"""Return the full batch, mark it as cleared and consumed"""
batch = self.coalesce_received(scheduled_i)
self.clear_scheduled(scheduled_i)
self.produced_i += 1
return batch
def receive_chunk(self, shm_chunk, completed_task):
"""Obtain the chunk and decode it, add to partially gathered result"""
scheduled_i = completed_task.scheduled_i
minibatch_i = completed_task.minibatch_i
worker_batch = deserialize_batch(shm_chunk, completed_task.batch_meta)
self.partially_received[scheduled_i][minibatch_i] = worker_batch
@property
def queue_depth(self):
return self.shm_manager.queue_depth
@property
def scheduled_ahead(self):
# at the begining of a new epoch all previously scheduled tasks are discarded
if not self.epoch_synced:
return 0
return len(self.task_queue)
class WorkerContext:
def __init__(self,
source_descs: SourceDescription,
dedicated_task_queue: Optional[ShmQueue],
shm_chunks: List[BufShmChunk]):
self.source_descs = source_descs
self.dedicated_task_queue = dedicated_task_queue
self.shm_chunks = shm_chunks
def create_worker_contexts(mp, callback_contexts: List[CallbackContext],
num_workers, callback_pickler) -> List[WorkerContext]:
"""
Prepares list of `WorkerContext` instances.
Each instance describes parameters specific to a given worker process (as opposed to
parameters common for all processes in the pool).
WorkerContext contains sources that the worker will receive and shared memory chunks
corresponding to the sources. It also contains dedicated `ShmQueue` instance if any of
the sources was assigned a dedicated worker.
"""
if callback_pickler is None:
source_descs = [cb_context.source_desc for cb_context in callback_contexts]
else:
source_descs = [copy.copy(cb_context.source_desc) for cb_context in callback_contexts]
for source_desc in source_descs:
source_desc.source = callback_pickler.dumps(source_desc.source)
general_cb_contexts = [
i for i, cb_context in enumerate(callback_contexts)
if cb_context.dedicated_worker_id is None]
worker_contexts = []
for worker_id in range(num_workers):
dedicated_cb_contexts = [i for i, cb_context in enumerate(callback_contexts)
if cb_context.dedicated_worker_id == worker_id]
worker_cb_contexts = general_cb_contexts + dedicated_cb_contexts
worker_sources = {i: source_descs[i] for i in worker_cb_contexts}
worker_shm_chunks = [shm_chunk for i in worker_cb_contexts
for shm_chunk in callback_contexts[i].shm_manager.get_chunks()]
if not dedicated_cb_contexts:
dedicated_task_queue = None
else:
# Each scheduled task has a shm chunk assigned for results, the number of
# scheduled tasks won't exceed the number of chunks available for results
dedicated_task_queue = ShmQueue(mp, capacity=sum(
callback_contexts[i].shm_manager.num_chunks
for i in dedicated_cb_contexts))
worker_context = WorkerContext(worker_sources, dedicated_task_queue, worker_shm_chunks)
worker_contexts.append(worker_context)
return worker_contexts
class ProcPool:
"""Runs pool of worker processes, stores pipes and sockets used to communicate with
the workers, starts thread keeping track of running processes and initializes communication.
"""
def __init__(self,
mp,
workers_contexts: List[WorkerContext],
result_queue: ShmQueue,
general_task_queue: Optional[ShmQueue],
callback_pickler):
start_method = mp.get_start_method()
if not workers_contexts:
raise RuntimeError("Cannot start a pool with no workers")
if start_method == 'fork' and _b.IsDriverInitialized():
raise RuntimeError(
"Error when starting Python worker threads for DALI parallel External Source. "
"Cannot fork a process when the CUDA has been initialized in the process. "
"CUDA is initialized during ``Pipeline.build()``, or can be initialized by another"
" library that interacts with CUDA, for example a DL framework creating "
"CUDA tensors. If you are trying to build multiple pipelines that use Python "
"workers, you will need to call ``start_py_workers`` method on all of them before "
"calling ``build`` method of any pipeline to start Python workers before CUDA is "
"initialized by ``build`` or other CUDA operation. Alternatively you can change "
"Python workers starting method from ``fork`` to ``spawn`` "
"(see DALI Pipeline's ``py_start_method`` option for details). ")
self._workers_contexts = workers_contexts
self._result_queue = result_queue
self._general_task_queue = general_task_queue
self._observer = None
self._processes = []
write_sockets = []
try:
for worker_i, worker_context in enumerate(workers_contexts):
if start_method == "fork":
read_socket = None
else:
read_socket, write_socket = socket.socketpair()
write_sockets.append(write_socket)
process_context = WorkerArgs(
worker_id=worker_i,
start_method=start_method,
source_descs=worker_context.source_descs,
shm_chunks=worker_context.shm_chunks,
general_task_queue=general_task_queue,
dedicated_task_queue=worker_context.dedicated_task_queue,
result_queue=result_queue, setup_socket=read_socket,
callback_pickler=callback_pickler
)
process = mp.Process(target=worker, args=(process_context,))
self._processes.append(process)
self._start_processes(mp, start_method, write_sockets)
finally:
for sock in write_sockets:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
@classmethod
def from_contexts(cls, contexts: List[CallbackContext], num_workers,
start_method="fork", py_callback_pickler=None):
mp = multiprocessing.get_context(start_method)
# checks if there are any sources without dedicated worker id, if so,
# the `general_task_queue` instance is needed to distribute tasks among all the workers
general_sources_buffs = [
context.shm_manager for context in contexts
if context.dedicated_worker_id is None]
if not general_sources_buffs:
general_task_queue = None
else:
# Each scheduled task has a shm chunk assigned for results, the number of
# scheduled tasks won't exceed the number of chunks available for results
general_task_queue = ShmQueue(mp, capacity=sum(
shm_manager.num_chunks for shm_manager in general_sources_buffs))
# Each computed minibatch makes for one message in the results queue, the number of
# messages won't exceed the number of shm chunks available to store the minibatches
# in all the `ShmChunkManager` instances.
scheduled_tasks_upper_bound = sum(context.shm_manager.num_chunks for context in contexts)
# assure enough space for messages sent to confirm initialization of the workers
result_queue_capacity = max(scheduled_tasks_upper_bound, num_workers)
result_queue = ShmQueue(mp, capacity=result_queue_capacity)
callback_pickler = None \
if start_method == "fork" else pickling._CustomPickler.create(py_callback_pickler)
worker_contexts = create_worker_contexts(mp, contexts, num_workers, callback_pickler)
instance = None
try:
instance = cls(mp, worker_contexts, result_queue, general_task_queue, callback_pickler)
if general_task_queue is not None:
general_task_queue.close_handle()
result_queue.close_handle()
for worker_context in worker_contexts:
if worker_context.dedicated_task_queue is not None:
worker_context.dedicated_task_queue.close_handle()
return instance
except: # noqa: E722
if instance is not None:
instance.close()
raise
@property
def num_workers(self):
return len(self._workers_contexts)
def pids(self):
"""Get pids of the processes started by this pool.
"""
return [proc.pid for proc in self._processes]
def close(self):
if self._observer is None:
return
self._observer.close()
self._observer = None
def wait_for_res(self):
if self._observer is None:
raise RuntimeError("Cannot receive data from the pool that has been closed")
return self._result_queue.get(None)
def send(self, tasks: List[Tuple[BufShmChunk, Any]], dedicated_worker_id):
if self._observer is None:
raise RuntimeError("Cannot send tasks to the pool that has been closed")
shm_msg_descs = [write_shm_message(-1, shm_chunk, msg, 0, resize=False)
for shm_chunk, msg in tasks]
if dedicated_worker_id is None:
if self._general_task_queue.put(shm_msg_descs) is None:
raise RuntimeError("Sending tasks to workers failed")
else:
worker_ctx = self._workers_contexts[dedicated_worker_id]
if worker_ctx.dedicated_task_queue.put(shm_msg_descs) is None:
raise RuntimeError("Sending tasks to worker {} failed".format(dedicated_worker_id))
def _sync_initialized_workers(self):
workers_received = []
while len(workers_received) < self.num_workers:
shm_msgs = self.wait_for_res()
if shm_msgs is None:
raise RuntimeError("Workers initialization failed")
synced_ids = [shm_msg.worker_id for shm_msg in shm_msgs]
assert all(0 <= worker_id < self.num_workers and worker_id not in workers_received
for worker_id in synced_ids)
workers_received.extend(synced_ids)
def _send_queue_handles(self, write_sockets):
pid = os.getppid()
all_worker_queues = [self._result_queue]
if self._general_task_queue is not None:
all_worker_queues.append(self._general_task_queue)
for queue in all_worker_queues:
for sock in write_sockets:
multiprocessing.reduction.send_handle(sock, queue.shm.handle, pid)
for sock, worker_context in zip(write_sockets, self._workers_contexts):
if worker_context.dedicated_task_queue is not None:
multiprocessing.reduction.send_handle(
sock, worker_context.dedicated_task_queue.shm.handle, pid)
def _send_shm_handles(self, socks):
pid = os.getppid()
for sock, worker_context in zip(socks, self._workers_contexts):
for shm_chunk in worker_context.shm_chunks:
multiprocessing.reduction.send_handle(sock, shm_chunk.handle, pid)
def _start_processes(self, mp, start_method, write_sockets):
try:
for process in self._processes:
process.start()
task_queues = [
worker_context.dedicated_task_queue
for worker_context in self._workers_contexts
if worker_context.dedicated_task_queue is not None]
if self._general_task_queue is not None:
task_queues.append(self._general_task_queue)
self._observer = Observer(mp, self._processes, task_queues, self._result_queue)
if start_method != "fork":
# NOTE when making any changes here, make sure to reflect them in the worker
# process, so that it sets received handles to objects in the same order
self._send_queue_handles(write_sockets)
self._send_shm_handles(write_sockets)
self._sync_initialized_workers()
except: # noqa: E722
if self._observer is not None:
self._observer.close()
self._observer = None
else:
for proc in self._processes:
if proc.is_alive():
proc.terminate()
for proc in self._processes:
if proc.pid is not None:
proc.join()
raise
class Observer:
"""
Closes the whole pool of worker processes if any of the processes exits. The processes can also
be closed from the main process by calling observer `close` method.
----------
`mp` : Python's multiprocessing context (depending on start method used: `spawn` or `fork`)
`processes` : List of multiprocessing Process instances
`task_queues` : List[ShmQueue]
Queues that worker processes take tasks from. If `close` method is called and none of
the processes exited abruptly so far, the queues will be used to notify the workers about
closing to let the workers gracefully exit.
`result_queue` : ShmQueue
Queue where worker processes report completed tasks. It gets closed along with the worker
processes, to prevent the main process blocking on waiting for results from the workers.
"""
def __init__(self, mp, processes, task_queues, result_queue):
self._interruption_pipe, self.interrupt_pipe = mp.Pipe(duplex=False)
self._processes = processes
self._task_queues = task_queues
self._result_queue = result_queue
self.thread = threading.Thread(target=self._observer_thread, daemon=True)
self.thread.start()
def _observer_thread(self):
"""Observer thread for ProcPool used for stopping and joining processes.
"""
exit_gently = True
try:
ps = {p.sentinel: p for p in self._processes}
listen_for = list(ps.keys()) + [self._interruption_pipe]
# Once one process exits stop the whole group (gracefully if possible)
while True:
sentinels = multiprocessing.connection.wait(listen_for)
proc_sentinels = [s for s in sentinels if s != self._interruption_pipe]
if self._interruption_pipe in sentinels:
break
if any(ps[sentinel].exitcode is not None for sentinel in proc_sentinels):
exit_gently = False
break
except: # noqa: E722
exit_gently = False
raise
finally:
self._result_queue.close() # main thread may be blocked on reading the queue
if exit_gently:
# try to close task queues and notify waiting processes, so that they can
# cleanup and exit. Unfortunately if all workers exited abruptly when waiting,
# an attempt to notify workers with multiprocessing.Condition might lead to
# deadlock on underlying semaphore. For this reason it is done only if none of
# the workers reported to have exited.
for queue in self._task_queues:
queue.close()
for proc in self._processes:
proc.join(1)
for proc in self._processes:
if proc.exitcode is None:
proc.terminate()
proc.join()
def close(self):
if self.thread is not None:
# send anything via interruption_pipe to notify observer thread about closing
self.interrupt_pipe.send(None)
self.thread.join()
self.thread = None
def create_shm_chunk_manager_for_group(group, shm_pool, keep_alive_queue_size,
min_initial_chunk_size, num_workers,
batch_size=None):
num_mini_batches = 1 if group.batch else num_workers
if group.bytes_per_sample_hint is None or batch_size is None:
initial_chunk_size = min_initial_chunk_size
else:
num_samples_per_mini_batch = (batch_size + num_mini_batches - 1) // num_mini_batches
initial_chunk_size = num_samples_per_mini_batch * group.bytes_per_sample_hint
initial_chunk_size = max(min_initial_chunk_size, initial_chunk_size)
return ShmChunkManager(
shm_pool,
keep_alive_queue_size + group.prefetch_queue_depth,
initial_chunk_size,
num_mini_batches)
class WorkerPool:
""""Combines worker processes pool with callback contexts, can be used to schedule batches
to be run on the workers and to receive resulting batches from the workers."""
def __init__(self, contexts: List[CallbackContext], pool: ProcPool):
"""
Parameters
----------
`contexts` : List[CallbackContext]
List of callbacks' contexts to be handled by the Worker.
`pool` : ProcPool
ProcPool instance enabling basic communication with worker processes, it should be
initialized with `contexts`.
"""
self.contexts = contexts
self.pool = pool
# shm chunks ids must be unique across the pool and each chunk must belong to
# exactly one context.
# Thanks to that callback context can be identified by the id of shm chunk.
self.shm_chunks_contexts = {
chunk_id: context
for context in self.contexts
for chunk_id in context.shm_manager.chunks_ids}
@classmethod
def from_groups(
cls, groups, keep_alive_queue_size, batch_size=None, start_method="fork",
num_workers=1, min_initial_chunk_size=1024 * 1024, py_callback_pickler=None):
"""Creates new WorkerPool instance for given list of ExternalSource groups.
Parameters
----------
`groups` : _ExternalSourceGroup list
List of external source groups.
`keep_alive_queue_size` : int
Number of the most recently produced batches whose underlying shared memory should
remain untouched (because they might still be referenced further in the pipeline).
Note that the actual number of simultaneously kept batches will be greater by the length
of parallel external source prefetching queue which is at least one.
`batch_size` : int, optional
Maximal batch size. For now, used only to estimate initial capacity of virtual
memory slots.
`start_method` : str
Method of starting worker processes, either fork or spawn.
`num_workers` : int
Number of workers to be created in ProcPool.
`min_initial_chunk_size` : int
Minimal initial size of each shared memory chunk.
NOTE it must be enough to accommodate serialized `ScheduledTask` instance.
"""
import_numpy()
if len(groups) == 0:
raise RuntimeError("Cannot create Python workers pool because"
" there are no callbacks provided")
if num_workers < 1:
raise RuntimeError("Number of Python workers for parallel"
" ExternalSource must be positive")
if any(group.source_desc.kind != SourceKind.CALLABLE and not group.batch
for group in groups):
raise RuntimeError("Parallel external source with iterator"
" or generator must run in batch mode")
# iterators and generators are stateful and run always in the same dedicated worker
num_cbs_dedicated = sum(cls.is_iterable_group(group) for group in groups)
num_cbs_general = len(groups) - num_cbs_dedicated
if num_cbs_general == 0:
if num_workers > num_cbs_dedicated:
warn_args = (num_cbs_dedicated, "s" if num_cbs_dedicated > 1 else "", num_workers)
warnings.warn("There will be run only {} python worker{}, even though {} were"
" specified to run. This may happen when all your ExternalSource"
" callbacks are stateful (for instance they are iterators) and there"
" is less of them than ```py_num_workers```".format(*warn_args),
Warning)
num_workers = num_cbs_dedicated
source_descs = [group.source_desc for group in groups]
dedicated_workers = cls.assign_dedicated_workers(groups, num_workers)
# common list for all the chunks allocated by ShmChunkManagers
# of all sources in the pipeline
shm_pool = []
shm_managers = [
create_shm_chunk_manager_for_group(
group, shm_pool, keep_alive_queue_size,
min_initial_chunk_size, num_workers, batch_size)
for group in groups]
contexts = [
CallbackContext(source_desc, shm_manager, dedicated_worker_id)
for source_desc, shm_manager, dedicated_worker_id
in zip(source_descs, shm_managers, dedicated_workers)]
pool = None
try:
pool = ProcPool.from_contexts(contexts, num_workers, start_method, py_callback_pickler)
# close underlying file descriptors that are not needed anymore once
# passed to the workers processes
for context in contexts:
context.shm_manager.close_handles()
return cls(contexts, pool)
except: # noqa: E722
if pool is not None:
pool.close()
raise
@classmethod
def assign_dedicated_workers(cls, groups, num_workers):
def get_next_dedicated_worker():
next_dedicated_worker = num_workers - 1
while True:
next_dedicated_worker = (next_dedicated_worker + 1) % num_workers
yield next_dedicated_worker
next_dedicated_worker = get_next_dedicated_worker()
return [next(next_dedicated_worker) if cls.is_iterable_group(group) else None
for group in groups]
@classmethod
def is_iterable_group(cls, group):
return group.source_desc.kind != SourceKind.CALLABLE
def schedule_batch(self, context_i, work_batch: TaskArgs):
"""Distribute `work_batch` among workers.
Parameters
----------
`context_i` : int
Specifies which callback will be used to run the task, it must be the index
corresponding to the order of callbacks passed when constructing WorkerPool.
`work_batch` : TaskArgs
Wrapper around parameters produced by the ExternalSource describing the next batch.
"""
context = self.contexts[context_i]
if not context.epoch_synced:
self._sync_and_discard(context_i)
context.epoch_synced = True
if context.iter_failed:
# there is no point in scheduling anything for the context that has reached the end of
# data or failed with an error, once user receives batch that raised the exception they
# should reset the context before scheduling new tasks
return False
minibatches = self._split_work(work_batch)
num_minibatches = len(minibatches)
assert num_minibatches <= context.shm_manager.num_minibatches
scheduled_i, dst_chunk_i = context.push_scheduled(num_minibatches)
self._distribute(context_i, scheduled_i, dst_chunk_i, minibatches)
return True
def _split_work(self, work_batch: TaskArgs):
if not work_batch.is_sample_mode():
return [work_batch]
num_minibatches = self.pool.num_workers
sample_range = work_batch.sample_range
samples_num = len(sample_range)
chunk_size = samples_num // num_minibatches
remainder = samples_num % num_minibatches
queued_no = 0
minibatches = []
for minibatch_i in range(num_minibatches):
worker_chunk = chunk_size + (minibatch_i < remainder)
if worker_chunk == 0:
break
sample_slice = sample_range[queued_no:queued_no + worker_chunk]
minibatch = TaskArgs(minibatch_i, sample_range=sample_slice)
minibatches.append(minibatch)
queued_no += worker_chunk
return minibatches
def _distribute(self, context_i, scheduled_i, dst_chunk_i, minibatches):
context = self.contexts[context_i]
scheduled_tasks = [(
context.shm_manager.get_chunk_by_dest(dst_chunk_i, minibatch_i),
ScheduledTask(context_i, scheduled_i, context.epoch_start, task))
for minibatch_i, task in enumerate(minibatches)
]
dedicated_worker_id = context.dedicated_worker_id
self.pool.send(scheduled_tasks, dedicated_worker_id)
def _sync_and_discard(self, context_i):
context = self.contexts[context_i]
assert not context.epoch_synced
while context.task_queue:
try:
batch = self.receive_batch(context_i)
assert batch is None
except StopIteration:
pass
def receive_batch(self, context_i):
"""Returns the next produced batch (in the order of schedule_batch calls) for the
``context_i``th callaback.
Parameters
----------
`context_i` : int
Specifies which callback you want the results from, ordering corresponds to the order of
callbacks passed when constructing the pool.
"""
context = self.contexts[context_i]
assert len(context.task_queue) > 0, "No task has been scheduled"
scheduled_i = context.pop_scheduled()
while context.is_not_received(scheduled_i):
self._receive_chunk()
context.handle_error(scheduled_i)
if context.is_stale(scheduled_i):
context.clear_scheduled(scheduled_i)
return None
res = context.take_processed(scheduled_i)
return res
def _receive_chunk(self):
completed_tasks_meta = self.pool.wait_for_res()
if completed_tasks_meta is None:
raise RuntimeError("Worker data receiving interrupted")
for completed_task_meta in completed_tasks_meta:
context = self.shm_chunks_contexts[completed_task_meta.shm_chunk_id]
shm_chunk = context.shm_manager.get_chunk_by_id(completed_task_meta.shm_chunk_id)
completed_task = read_shm_message(shm_chunk, completed_task_meta)
context.process_task(shm_chunk, completed_task)
def pids(self):
"""Get pids of the processes started by this pool.
"""
return self.pool.pids()
def reset(self):
for context in self.contexts:
context.reset()
def reset_context(self, context_i):
self.contexts[context_i].reset()
def close(self):
self.pool.close()
|
DALI-main
|
dali/python/nvidia/dali/_multiproc/pool.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
class Structure:
"""
Utility around Python `struct` module (https://docs.python.org/3.6/library/struct.html)
that allows to access and modify `_fields` like an ordinary object attributes
and read/write their values from/into the buffer in C struct like format.
Similar approach of declaring _fields_ with corresponding C types can be found in
Python `ctypes` module (https://docs.python.org/3/library/ctypes.html).
"""
# A tuple of (name, type) pairs, where type is a string encoding of a simple type,
# as used in struct
_fields = tuple()
def __init__(self, *values):
self.setup_struct()
self.set_values(*values)
@classmethod
def setup_struct(cls):
if '_struct_desc' not in cls.__dict__:
cls._struct_desc = "@" + "".join(field_type for _, field_type in cls._fields)
cls._struct = struct.Struct(cls._struct_desc)
def __getstate__(self):
return self.__dict__.copy()
def __setstate__(self, state):
self.__dict__.update(state)
self.setup_struct()
def set_values(self, *values):
for (field_name, _), value in zip(self._fields, values):
setattr(self, field_name, value)
def get_values(self):
return tuple(getattr(self, field_name) for field_name, _ in self._fields)
def pack_into(self, buf, offset):
try:
values = self.get_values()
return self._struct.pack_into(buf, offset, *values)
except struct.error as e:
raise RuntimeError(
"Failed to serialize object as C-like structure. "
"Tried to populate following fields: `{}` with respective values: `{}` ".format(
self._fields, self.get_values())) from e
def unpack_from(self, buf, offset):
values = self._struct.unpack_from(buf, offset)
self.set_values(*values)
return self
def get_size(self):
return self._struct.size
|
DALI-main
|
dali/python/nvidia/dali/_multiproc/struct_message.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
from nvidia.dali import fn
from nvidia.dali import types
from nvidia.dali.auto_aug import augmentations as a
from nvidia.dali.auto_aug.core import _Augmentation, Policy, signed_bin
from nvidia.dali.auto_aug.core._args import forbid_unused_kwargs as _forbid_unused_kwargs
from nvidia.dali.auto_aug.core._utils import \
get_translations as _get_translations, \
pretty_select as _pretty_select
from nvidia.dali.data_node import DataNode as _DataNode
try:
import numpy as np
except ImportError:
raise RuntimeError(
"Could not import numpy. DALI's automatic augmentation examples depend on numpy. "
"Please install numpy to use the examples.")
def auto_augment(
data: _DataNode,
policy_name: str = 'image_net',
shape: Optional[Union[_DataNode, Tuple[int, int]]] = None,
fill_value: Optional[int] = 128,
interp_type: Optional[types.DALIInterpType] = None,
max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None,
seed: Optional[int] = None,
) -> _DataNode:
"""
Applies one of the predefined policies from the AutoAugment
paper (https://arxiv.org/abs/1805.09501) to the provided batch of samples.
Args
----
data : DataNode
A batch of samples to be processed. The supported samples are images
of `HWC` layout and videos of `FHWC` layout, the supported data type is `uint8`.
policy_name : str, optional
The name of predefined policy. Acceptable values are: `image_net`,
`reduced_image_net`, `svhn`, `reduced_cifar10`. Defaults to `image_net`.
shape: DataNode or Tuple[int, int], optional
The size (height and width) of the image or frames in the video sequence
passed as the `data`. If specified, the magnitude of `translation` operations
depends on the image/frame shape and spans from 0 to `max_translate_rel * shape`.
Otherwise, the magnitude range is `[0, max_translate_abs]` for any sample.
fill_value: int, optional
A value to be used as a padding for images/frames transformed with warp_affine ops
(translation, shear and rotate). If `None` is specified, the images/frames are padded
with the border value repeated (clamped).
interp_type: types.DALIInterpType, optional
Interpolation method used by the warp_affine ops (translation, shear and rotate).
Supported values are `types.INTERP_LINEAR` (default) and `types.INTERP_NN`.
max_translate_abs: int or (int, int), optional
Only valid when `shape` is not provided. Specifies the maximal shift (in pixels)
in the translation augmentation. If a tuple is specified, the first component limits
height, the second the width. Defaults to 250, which means the maximal magnitude
shifts the image by 250 pixels.
max_translate_rel: float or (float, float), optional
Only valid when `shape` argument is provided. Specifies the maximal shift as a
fraction of image shape in the translation augmentations.
If a tuple is specified, the first component limits the height, the second the width.
Defaults to 1, which means the maximal magnitude shifts the image entirely out of
the canvas.
seed: int, optional
Seed to be used to randomly sample operations (and to negate magnitudes).
Returns
-------
DataNode
A batch of transformed samples.
"""
predefined_policies = {
'image_net': get_image_net_policy,
'reduced_image_net': get_reduced_image_net_policy,
'svhn': get_svhn_policy,
'reduced_cifar10': get_reduced_cifar10_policy,
}
policies_without_translation = ('reduced_image_net', )
shape_related_args = (
(shape, 'shape'),
(max_translate_abs, 'max_translate_abs'),
(max_translate_rel, 'max_translate_rel'),
)
if not isinstance(policy_name, str) or policy_name not in predefined_policies:
policies_str = ", ".join([f"`{name}`" for name in predefined_policies.keys()])
raise Exception(
f"The `policy_name` must be a string that takes one of the values: {policies_str}")
if policy_name in policies_without_translation:
shape_arg = next((name for arg, name in shape_related_args if arg is not None), None)
if shape_arg is not None:
raise Exception(
f"The policy `{policy_name}` does not contain any augmentations that rely on the "
f"image shape. The `{shape_arg}` argument must not be specified in that case.")
aug_kwargs = {"fill_value": fill_value, "interp_type": interp_type}
use_shape = shape is not None
if use_shape:
aug_kwargs["shape"] = shape
if policy_name in policies_without_translation:
policy = predefined_policies[policy_name]()
else:
policy = predefined_policies[policy_name](use_shape=use_shape,
max_translate_abs=max_translate_abs,
max_translate_rel=max_translate_rel)
return apply_auto_augment(policy, data, seed, **aug_kwargs)
def auto_augment_image_net(
data: _DataNode,
shape: Optional[Union[_DataNode, Tuple[int, int]]] = None,
fill_value: Optional[int] = 128,
interp_type: Optional[types.DALIInterpType] = None,
max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None,
seed: Optional[int] = None,
) -> _DataNode:
"""
Applies `image_net_policy` in AutoAugment (https://arxiv.org/abs/1805.09501)
fashion to the provided batch of samples.
Equivalent to :meth:`~nvidia.dali.auto_aug.auto_augment.auto_augment` call with ``policy_name``
specified to ``'image_net'``.
See :meth:`~nvidia.dali.auto_aug.auto_augment.auto_augment` function for details.
"""
return auto_augment(data, "image_net", shape, fill_value, interp_type, max_translate_abs,
max_translate_rel, seed)
def apply_auto_augment(policy: Policy, data: _DataNode, seed: Optional[int] = None,
**kwargs) -> _DataNode:
"""
Applies AutoAugment (https://arxiv.org/abs/1805.09501) augmentation scheme to the
provided batch of samples.
Args
----
policy: Policy
Set of sequences of augmentations to be applied in AutoAugment fashion.
data : DataNode
A batch of samples to be processed.
seed: int, optional
Seed to be used to randomly sample operations (and to negate magnitudes).
kwargs:
A dictionary of extra parameters to be passed when calling augmentations.
The signature of each augmentation is checked for any extra arguments and if
the name of the argument matches one from the `kwargs`, the value is
passed as an argument. For example, some augmentations from the default
AutoAugment suite accept ``shape``, ``fill_value`` and ``interp_type``.
Returns
-------
DataNode
A batch of transformed samples.
"""
if len(policy.sub_policies) == 0:
raise Exception(f"Cannot run empty policy. Got {policy} in `apply_auto_augment` call.")
max_policy_len = max(len(sub_policy) for sub_policy in policy.sub_policies)
should_run = fn.random.uniform(range=[0, 1], shape=(max_policy_len, ), dtype=types.FLOAT,
seed=seed)
sub_policy_id = fn.random.uniform(values=list(range(len(policy.sub_policies))), seed=seed,
dtype=types.INT32)
run_probabilities = _sub_policy_to_probability_map(policy)[sub_policy_id]
magnitude_bins = _sub_policy_to_magnitude_bin_map(policy)[sub_policy_id]
aug_ids, augmentations = _sub_policy_to_augmentation_map(policy)
aug_ids = aug_ids[sub_policy_id]
if any(aug.randomly_negate for aug in policy.augmentations.values()):
magnitude_bins = signed_bin(magnitude_bins, seed=seed, shape=(max_policy_len, ))
_forbid_unused_kwargs(policy.augmentations.values(), kwargs, 'apply_auto_augment')
for stage_id in range(max_policy_len):
if should_run[stage_id] < run_probabilities[stage_id]:
op_kwargs = dict(data=data, magnitude_bin=magnitude_bins[stage_id],
num_magnitude_bins=policy.num_magnitude_bins, **kwargs)
data = _pretty_select(augmentations[stage_id], aug_ids[stage_id], op_kwargs,
auto_aug_name='apply_auto_augment',
ref_suite_name='get_image_net_policy')
return data
def get_image_net_policy(use_shape: bool = False, max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None) -> Policy:
"""
Creates augmentation policy tuned for the ImageNet as described in
AutoAugment paper (https://arxiv.org/abs/1805.09501).
The returned policy can be run with
:meth:`~nvidia.dali.auto_aug.auto_augment.apply_auto_augment`.
Args
----
use_shape : bool
If true, the translation offset is computed as a percentage of the image/frame shape.
Useful if the samples processed with the auto augment have different shapes.
If false, the offsets range is bounded by a constant (`max_translate_abs`).
max_translate_abs: int or (int, int), optional
Only valid with use_shape=False, specifies the maximal shift (in pixels) in the translation
augmentations. If a tuple is specified, the first component limits height, the second the
width. Defaults to 250.
max_translate_rel: float or (float, float), optional
Only valid with use_shape=True, specifies the maximal shift as a fraction of image/frame
shape in the translation augmentations. If a tuple is specified, the first component limits
height, the second the width. Defaults to 1.
"""
default_translate_abs, default_translate_rel = 250, 1.
_, translate_y = _get_translations(use_shape, default_translate_abs, default_translate_rel,
max_translate_abs, max_translate_rel)
shear_x = a.shear_x.augmentation((0, 0.3), True)
shear_y = a.shear_y.augmentation((0, 0.3), True)
rotate = a.rotate.augmentation((0, 30), True)
color = a.color.augmentation((0.1, 1.9), False, None)
posterize = a.posterize.augmentation((0, 4), False, a.poster_mask_uint8)
solarize = a.solarize.augmentation((0, 256), False)
solarize_add = a.solarize_add.augmentation((0, 110), False)
invert = a.invert
equalize = a.equalize
auto_contrast = a.auto_contrast
return Policy(
name="ImageNetPolicy", num_magnitude_bins=11, sub_policies=[
[(equalize, 0.8, None), (shear_y, 0.8, 4)],
[(color, 0.4, 9), (equalize, 0.6, None)],
[(color, 0.4, 1), (rotate, 0.6, 8)],
[(solarize, 0.8, 3), (equalize, 0.4, None)],
[(solarize, 0.4, 2), (solarize, 0.6, 2)],
[(color, 0.2, 0), (equalize, 0.8, None)],
[(equalize, 0.4, None), (solarize_add, 0.8, 3)],
[(shear_x, 0.2, 9), (rotate, 0.6, 8)],
[(color, 0.6, 1), (equalize, 1.0, None)],
[(invert, 0.4, None), (rotate, 0.6, 0)],
[(equalize, 1.0, None), (shear_y, 0.6, 3)],
[(color, 0.4, 7), (equalize, 0.6, None)],
[(posterize, 0.4, 6), (auto_contrast, 0.4, None)],
[(solarize, 0.6, 8), (color, 0.6, 9)],
[(solarize, 0.2, 4), (rotate, 0.8, 9)],
[(rotate, 1.0, 7), (translate_y, 0.8, 9)],
[(solarize, 0.8, 4)],
[(shear_y, 0.8, 0), (color, 0.6, 4)],
[(color, 1.0, 0), (rotate, 0.6, 2)],
[(equalize, 0.8, None)],
[(equalize, 1.0, None), (auto_contrast, 0.6, None)],
[(shear_y, 0.4, 7), (solarize_add, 0.6, 7)],
[(posterize, 0.8, 2), (solarize, 0.6, 10)],
[(solarize, 0.6, 8), (equalize, 0.6, None)],
[(color, 0.8, 6), (rotate, 0.4, 5)],
])
def get_reduced_cifar10_policy(use_shape: bool = False, max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None) -> Policy:
"""
Creates augmentation policy tuned with the reduced CIFAR-10 as described
in AutoAugment paper (https://arxiv.org/abs/1805.09501).
The returned policy can be run with
:meth:`~nvidia.dali.auto_aug.auto_augment.apply_auto_augment`.
Args
----
use_shape : bool
If true, the translation offset is computed as a percentage of the image/frame shape.
Useful if the samples processed with the auto augment have different shapes.
If false, the offsets range is bounded by a constant (`max_translate_abs`).
max_translate_abs: int or (int, int), optional
Only valid with use_shape=False, specifies the maximal shift (in pixels) in the translation
augmentations. If a tuple is specified, the first component limits height, the second the
width. Defaults to 250.
max_translate_rel: float or (float, float), optional
Only valid with use_shape=True, specifies the maximal shift as a fraction of image/frame
shape in the translation augmentations. If a tuple is specified, the first component limits
height, the second the width. Defaults to 1.
"""
default_translate_abs, default_translate_rel = 250, 1.
translate_x, translate_y = _get_translations(use_shape, default_translate_abs,
default_translate_rel, max_translate_abs,
max_translate_rel)
shear_y = a.shear_y.augmentation((0, 0.3), True)
rotate = a.rotate.augmentation((0, 30), True)
brightness = a.brightness.augmentation((0.1, 1.9), False, None)
color = a.color.augmentation((0.1, 1.9), False, None)
contrast = a.contrast.augmentation((0.1, 1.9), False, None)
sharpness = a.sharpness.augmentation((0.1, 1.9), False, a.sharpness_kernel_shifted)
posterize = a.posterize.augmentation((0, 4), False, a.poster_mask_uint8)
solarize = a.solarize.augmentation((0, 256), False)
invert = a.invert
equalize = a.equalize
auto_contrast = a.auto_contrast
return Policy(
name="ReducedCifar10Policy", num_magnitude_bins=11, sub_policies=[
[(invert, 0.1, None), (contrast, 0.2, 6)],
[(rotate, 0.7, 2), (translate_x, 0.3, 9)],
[(sharpness, 0.8, 1), (sharpness, 0.9, 3)],
[(shear_y, 0.5, 8), (translate_y, 0.7, 9)],
[(auto_contrast, 0.5, None), (equalize, 0.9, None)],
[(shear_y, 0.2, 7), (posterize, 0.3, 7)],
[(color, 0.4, 3), (brightness, 0.6, 7)],
[(sharpness, 0.3, 9), (brightness, 0.7, 9)],
[(equalize, 0.6, None), (equalize, 0.5, None)],
[(contrast, 0.6, 7), (sharpness, 0.6, 5)],
[(color, 0.7, 7), (translate_x, 0.5, 8)],
[(equalize, 0.3, None), (auto_contrast, 0.4, None)],
[(translate_y, 0.4, 3), (sharpness, 0.2, 6)],
[(brightness, 0.9, 6), (color, 0.2, 8)],
[(solarize, 0.5, 2)],
[(equalize, 0.2, None), (auto_contrast, 0.6, None)],
[(equalize, 0.2, None), (equalize, 0.6, None)],
[(color, 0.9, 9), (equalize, 0.6, None)],
[(auto_contrast, 0.8, None), (solarize, 0.2, 8)],
[(brightness, 0.1, 3), (color, 0.7, 0)],
[(solarize, 0.4, 5), (auto_contrast, 0.9, None)],
[(translate_y, 0.9, 9), (translate_y, 0.7, 9)],
[(auto_contrast, 0.9, None), (solarize, 0.8, 3)],
[(equalize, 0.8, None), (invert, 0.1, None)],
[(translate_y, 0.7, 9), (auto_contrast, 0.9, None)],
])
def get_svhn_policy(use_shape: bool = False, max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None) -> Policy:
"""
Creates augmentation policy tuned with the SVHN as described
in AutoAugment paper (https://arxiv.org/abs/1805.09501).
The returned policy can be run with
:meth:`~nvidia.dali.auto_aug.auto_augment.apply_auto_augment`.
Args
----
use_shape : bool
If true, the translation offset is computed as a percentage of the image/frame shape.
Useful if the samples processed with the auto augment have different shapes.
If false, the offsets range is bounded by a constant (`max_translate_abs`).
max_translate_abs: int or (int, int), optional
Only valid with use_shape=False, specifies the maximal shift (in pixels) in the translation
augmentations. If a tuple is specified, the first component limits height, the second the
width. Defaults to 250.
max_translate_rel: float or (float, float), optional
Only valid with use_shape=True, specifies the maximal shift as a fraction of image/frame
shape in the translation augmentations. If a tuple is specified, the first component limits
height, the second the width. Defaults to 1.
"""
default_translate_abs, default_translate_rel = 250, 1.
translate_x, translate_y = _get_translations(use_shape, default_translate_abs,
default_translate_rel, max_translate_abs,
max_translate_rel)
shear_x = a.shear_x.augmentation((0, 0.3), True)
shear_y = a.shear_y.augmentation((0, 0.3), True)
rotate = a.rotate.augmentation((0, 30), True)
contrast = a.contrast.augmentation((0.1, 1.9), False, None)
solarize = a.solarize.augmentation((0, 256), False)
invert = a.invert
equalize = a.equalize
auto_contrast = a.auto_contrast
return Policy(
name="SvhnPolicy", num_magnitude_bins=11, sub_policies=[
[(shear_x, 0.9, 4), (invert, 0.2, None)],
[(shear_y, 0.9, 8), (invert, 0.7, None)],
[(equalize, 0.6, None), (solarize, 0.6, 6)],
[(invert, 0.9, None), (equalize, 0.6, None)],
[(equalize, 0.6, None), (rotate, 0.9, 3)],
[(shear_x, 0.9, 4), (auto_contrast, 0.8, None)],
[(shear_y, 0.9, 8), (invert, 0.4, None)],
[(shear_y, 0.9, 5), (solarize, 0.2, 6)],
[(invert, 0.9, None), (auto_contrast, 0.8, None)],
[(equalize, 0.6, None), (rotate, 0.9, 3)],
[(shear_x, 0.9, 4), (solarize, 0.3, 3)],
[(shear_y, 0.8, 8), (invert, 0.7, None)],
[(equalize, 0.9, None), (translate_y, 0.6, 6)],
[(invert, 0.9, None), (equalize, 0.6, None)],
[(contrast, 0.3, 3), (rotate, 0.8, 4)],
[(invert, 0.8, None)],
[(shear_y, 0.7, 6), (solarize, 0.4, 8)],
[(invert, 0.6, None), (rotate, 0.8, 4)],
[(shear_y, 0.3, 7), (translate_x, 0.9, 3)],
[(shear_x, 0.1, 6), (invert, 0.6, None)],
[(solarize, 0.7, 2), (translate_y, 0.6, 7)],
[(shear_y, 0.8, 4), (invert, 0.8, None)],
[(shear_x, 0.7, 9), (translate_y, 0.8, 3)],
[(shear_y, 0.8, 5), (auto_contrast, 0.7, None)],
[(shear_x, 0.7, 2), (invert, 0.1, None)],
])
def get_reduced_image_net_policy() -> Policy:
"""
Creates augmentation policy tuned with the reduced ImageNet as described in
AutoAugment paper (https://arxiv.org/abs/1805.09501).
The returned policy can be run with
:meth:`~nvidia.dali.auto_aug.auto_augment.apply_auto_augment`.
"""
shear_x = a.shear_x.augmentation((0, 0.3), True)
rotate = a.rotate.augmentation((0, 30), True)
color = a.color.augmentation((0.1, 1.9), False, None)
contrast = a.contrast.augmentation((0.1, 1.9), False, None)
sharpness = a.sharpness.augmentation((0.1, 1.9), False, a.sharpness_kernel_shifted)
posterize = a.posterize.augmentation((0, 4), False, a.poster_mask_uint8)
solarize = a.solarize.augmentation((0, 256), False)
invert = a.invert
equalize = a.equalize
auto_contrast = a.auto_contrast
return Policy(
name="ReducedImageNetPolicy",
num_magnitude_bins=11, sub_policies=[[(posterize, 0.4, 8), (rotate, 0.6, 9)],
[(solarize, 0.6, 5), (auto_contrast, 0.6, None)],
[(equalize, 0.8, None), (equalize, 0.6, None)],
[(posterize, 0.6, 7), (posterize, 0.6, 6)],
[(equalize, 0.4, None), (solarize, 0.2, 4)],
[(equalize, 0.4, None), (rotate, 0.8, 8)],
[(solarize, 0.6, 3), (equalize, 0.6, None)],
[(posterize, 0.8, 5), (equalize, 1.0, None)],
[(rotate, 0.2, 3), (solarize, 0.6, 8)],
[(equalize, 0.6, None), (posterize, 0.4, 6)],
[(rotate, 0.8, 8), (color, 0.4, 0)],
[(rotate, 0.4, 9), (equalize, 0.6, None)],
[(equalize, 0.8, None)],
[(invert, 0.6, None), (equalize, 1.0, None)],
[(color, 0.6, 4), (contrast, 1.0, 8)],
[(rotate, 0.8, 8), (color, 1.0, 2)],
[(color, 0.8, 8), (solarize, 0.8, 7)],
[(sharpness, 0.4, 7), (invert, 0.6, None)],
[(shear_x, 0.6, 5), (equalize, 1.0, None)],
[(color, 0.4, 0), (equalize, 0.6, None)],
[(equalize, 0.4, None), (solarize, 0.2, 4)],
[(solarize, 0.6, 5), (auto_contrast, 0.6, None)],
[(invert, 0.6, None), (equalize, 1.0, None)],
[(color, 0.6, 4), (contrast, 1.0, 8)],
[(equalize, 0.8, None), (equalize, 0.6, None)]])
def _sub_policy_to_probability_map(policy: Policy) -> _DataNode:
sub_policies = policy.sub_policies
max_policy_len = max(len(sub_policy) for sub_policy in sub_policies)
prob = np.array([[0. for _ in range(max_policy_len)] for _ in range(len(sub_policies))],
dtype=np.float32)
for sub_policy_id, sub_policy in enumerate(sub_policies):
for stage_idx, (aug_name, p, mag) in enumerate(sub_policy):
prob[sub_policy_id, stage_idx] = p
return types.Constant(prob)
def _sub_policy_to_magnitude_bin_map(policy: Policy) -> _DataNode:
sub_policies = policy.sub_policies
max_policy_len = max(len(sub_policy) for sub_policy in sub_policies)
magnitude_bin = np.array([[0 for _ in range(max_policy_len)] for _ in range(len(sub_policies))],
dtype=np.int32)
for sub_policy_id, sub_policy in enumerate(sub_policies):
for stage_idx, (aug_name, p, mag) in enumerate(sub_policy):
# use dummy value instead of None, it will be ignored anyway
val = mag if mag is not None else -999
magnitude_bin[sub_policy_id, stage_idx] = val
return types.Constant(magnitude_bin)
def _sub_policy_to_augmentation_matrix_map(
policy: Policy) -> Tuple[np.ndarray, List[List[_Augmentation]]]:
"""
Creates a matrix of operators to be called for given sub policy at given stage.
The output is a tuple `(m, augments)`, where `augments` is a list of augmentations per stage
- each entry contains a reduced list of unique augmentations used in a corresponding stage.
The `m` matrix contains the mapping from the original sub_policy_id, to the index within the
reduced list, for every stage. I.e., for policy `sub_policy_idx`, as the `stage_idx`-ith
operation in a sequence, the `augments[stage_idx][m[sub_policy_idx][stage_idx]]` operator
should be called.
"""
sub_policies = policy.sub_policies
max_policy_len = max(len(sub_policy) for sub_policy in sub_policies)
augmentations = [] # list of augmentations in each stage
for stage_idx in range(max_policy_len):
stage_augments = set()
stage_augments_list = []
for sub_policy in sub_policies:
if stage_idx < len(sub_policy):
aug, _, _ = sub_policy[stage_idx]
if aug not in stage_augments:
stage_augments.add(aug)
stage_augments_list.append(aug)
augmentations.append(stage_augments_list + [a.identity])
identity_id = [len(stage_augments) - 1 for stage_augments in augmentations]
augment_to_id = [{augmentation: i
for i, augmentation in enumerate(stage_augments)}
for stage_augments in augmentations]
augments_by_id = np.array([[identity_id[stage_idx] for stage_idx in range(max_policy_len)]
for _ in range(len(sub_policies))], dtype=np.int32)
for sub_policy_id, sub_policy in enumerate(sub_policies):
for stage_idx, (augment, p, mag) in enumerate(sub_policy):
augments_by_id[sub_policy_id, stage_idx] = augment_to_id[stage_idx][augment]
return augments_by_id, augmentations
def _sub_policy_to_augmentation_map(policy: Policy) -> Tuple[_DataNode, List[List[_Augmentation]]]:
matrix, augments = _sub_policy_to_augmentation_matrix_map(policy)
return types.Constant(matrix), augments
|
DALI-main
|
dali/python/nvidia/dali/auto_aug/auto_augment.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
from nvidia.dali import fn
from nvidia.dali import types
from nvidia.dali.auto_aug import augmentations as a
from nvidia.dali.auto_aug.core import _Augmentation, signed_bin
from nvidia.dali.auto_aug.core._args import \
forbid_unused_kwargs as _forbid_unused_kwargs
from nvidia.dali.auto_aug.core._utils import \
get_translations as _get_translations, \
pretty_select as _pretty_select
from nvidia.dali.data_node import DataNode as _DataNode
def trivial_augment_wide(
data: _DataNode,
num_magnitude_bins: int = 31,
shape: Optional[Union[_DataNode, Tuple[int, int]]] = None,
fill_value: Optional[int] = 128,
interp_type: Optional[types.DALIInterpType] = None,
max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None,
seed: Optional[int] = None,
excluded: Optional[List[str]] = None,
) -> _DataNode:
"""
Applies TrivialAugment Wide (https://arxiv.org/abs/2103.10158) augmentation scheme to the
provided batch of samples.
Args
----
data : DataNode
A batch of samples to be processed. The supported samples are images
of `HWC` layout and videos of `FHWC` layout, the supported data type is `uint8`.
num_magnitude_bins: int, optional
The number of bins to divide the magnitude ranges into.
fill_value: int, optional
A value to be used as a padding for images/frames transformed with warp_affine ops
(translation, shear and rotate). If `None` is specified, the images/frames are padded
with the border value repeated (clamped).
interp_type: types.DALIInterpType, optional
Interpolation method used by the warp_affine ops (translation, shear and rotate).
Supported values are `types.INTERP_LINEAR` (default) and `types.INTERP_NN`.
max_translate_abs: int or (int, int), optional
Only valid when ``shapes`` is not provided. Specifies the maximal shift (in pixels)
in the translation augmentation. If a tuple is specified, the first component limits
height, the second the width. Defaults to 32, which means the maximal magnitude
shifts the image by 32 pixels.
max_translate_rel: float or (float, float), optional
Only valid when ``shapes`` argument is provided. Specifies the maximal shift as a
fraction of image shape in the translation augmentations.
If a tuple is specified, the first component limits the height, the second the width.
Defaults to 1, which means the maximal magnitude shifts the image entirely out of
the canvas.
seed: int, optional
Seed to be used to randomly sample operations (and to negate magnitudes).
excluded: List[str], optional
A list of names of the operations to be excluded from the default suite of augmentations.
If, instead of just limiting the set of operations, you need to include some custom
operations or fine-tuned of the existing ones, you can use the
:meth:`~nvidia.dali.auto_aug.trivial_augment.apply_trivial_augment` directly,
which accepts a list of augmentations.
Returns
-------
DataNode
A batch of transformed samples.
"""
aug_kwargs = {"fill_value": fill_value, "interp_type": interp_type}
use_shape = shape is not None
if use_shape:
aug_kwargs["shape"] = shape
augmentations = get_trivial_augment_wide_suite(use_shape=use_shape,
max_translate_abs=max_translate_abs,
max_translate_rel=max_translate_rel)
augmentation_names = set(aug.name for aug in augmentations)
assert len(augmentation_names) == len(augmentations)
excluded = excluded or []
for name in excluded:
if name not in augmentation_names:
raise Exception(
f"The `{name}` was specified in `excluded`, but the TrivialAugmentWide suite "
f"does not contain augmentation with this name. "
f"The augmentations in the suite are: {', '.join(augmentation_names)}.")
selected_augments = [aug for aug in augmentations if aug.name not in excluded]
return apply_trivial_augment(selected_augments, data, num_magnitude_bins=num_magnitude_bins,
seed=seed, **aug_kwargs)
def apply_trivial_augment(augmentations: List[_Augmentation], data: _DataNode,
num_magnitude_bins: int = 31, seed: Optional[int] = None,
**kwargs) -> _DataNode:
"""
Applies the list of `augmentations` in TrivialAugment
(https://arxiv.org/abs/2103.10158) fashion.
Each sample is processed with randomly selected transformation form `augmentations` list.
The magnitude bin for every transformation is randomly selected from
`[0, num_magnitude_bins - 1]`.
Args
----
augmentations : List[core._Augmentation]
List of augmentations to be sampled and applied in TrivialAugment fashion.
data : DataNode
A batch of samples to be processed.
num_magnitude_bins: int, optional
The number of bins to divide the magnitude ranges into.
seed: int, optional
Seed to be used to randomly sample operations (and to negate magnitudes).
kwargs:
Any extra parameters to be passed when calling `augmentations`.
The signature of each augmentation is checked for any extra arguments and if
the name of the argument matches one from the `kwargs`, the value is
passed as an argument. For example, some augmentations from the default
TrivialAugment suite accept ``shapes``, ``fill_value`` and ``interp_type``.
Returns
-------
DataNode
A batch of transformed samples.
"""
if not isinstance(num_magnitude_bins, int) or num_magnitude_bins < 1:
raise Exception(
f"The `num_magnitude_bins` must be a positive integer, got {num_magnitude_bins}.")
if len(augmentations) == 0:
raise Exception("The `augmentations` list cannot be empty. "
"Got empty list in `apply_trivial_augment` call.")
magnitude_bin = fn.random.uniform(values=list(range(num_magnitude_bins)), dtype=types.INT32,
seed=seed)
use_signed_magnitudes = any(aug.randomly_negate for aug in augmentations)
if use_signed_magnitudes:
magnitude_bin = signed_bin(magnitude_bin, seed=seed)
_forbid_unused_kwargs(augmentations, kwargs, 'apply_trivial_augment')
op_kwargs = dict(data=data, magnitude_bin=magnitude_bin, num_magnitude_bins=num_magnitude_bins,
**kwargs)
op_idx = fn.random.uniform(values=list(range(len(augmentations))), seed=seed, dtype=types.INT32)
return _pretty_select(augmentations, op_idx, op_kwargs, auto_aug_name='apply_trivial_augment',
ref_suite_name='get_trivial_augment_wide_suite')
def get_trivial_augment_wide_suite(
use_shape: bool = False, max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None) -> List[_Augmentation]:
"""
Creates a list of 14 augmentations referred as wide augmentation space in TrivialAugment paper
(https://arxiv.org/abs/2103.10158).
Args
----
use_shape : bool
If true, the translation offset is computed as a percentage of the image/frame shape.
Useful if the samples processed with the auto augment have different shapes.
If false, the offsets range is bounded by a constant (`max_translate_abs`).
max_translate_abs: int or (int, int), optional
Only valid with use_shape=False, specifies the maximal shift (in pixels) in the translation
augmentations. If a tuple is specified, the first component limits height, the second the
width. Defaults to 32.
max_translate_rel: float or (float, float), optional
Only valid with use_shape=True, specifies the maximal shift as a fraction of image/frame
shape in the translation augmentations. If a tuple is specified, the first component limits
height, the second the width. Defaults to 1.
"""
default_translate_abs, default_translate_rel = 32, 1.
# translations = [translate_x, translate_y] with adjusted magnitude range
translations = _get_translations(use_shape, default_translate_abs, default_translate_rel,
max_translate_abs, max_translate_rel)
# [.augmentation((mag_low, mag_high), randomly_negate_mag, custom_magnitude_to_param_mapping]
return translations + [
a.shear_x.augmentation((0, 0.99), True),
a.shear_y.augmentation((0, 0.99), True),
a.rotate.augmentation((0, 135), True),
a.brightness.augmentation((0.01, 0.99), True, a.shift_enhance_range),
a.contrast.augmentation((0.01, 0.99), True, a.shift_enhance_range),
a.color.augmentation((0.01, 0.99), True, a.shift_enhance_range),
a.sharpness.augmentation((0.01, 0.99), True, a.sharpness_kernel),
a.posterize.augmentation((8, 2), False, a.poster_mask_uint8),
# solarization strength increases with decreasing magnitude (threshold)
a.solarize.augmentation((256, 0)),
a.equalize,
a.auto_contrast,
a.identity,
]
|
DALI-main
|
dali/python/nvidia/dali/auto_aug/trivial_augment.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
DALI-main
|
dali/python/nvidia/dali/auto_aug/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List, Optional, Tuple, Union
from nvidia.dali import fn
from nvidia.dali import types
from nvidia.dali.auto_aug import augmentations as a
from nvidia.dali.auto_aug.core import signed_bin, _Augmentation
from nvidia.dali.auto_aug.core._args import \
forbid_unused_kwargs as _forbid_unused_kwargs
from nvidia.dali.auto_aug.core._utils import \
get_translations as _get_translations, \
pretty_select as _pretty_select
from nvidia.dali.data_node import DataNode as _DataNode
def rand_augment(
data: _DataNode,
n: int,
m: int,
num_magnitude_bins: int = 31,
shape: Optional[Union[_DataNode, Tuple[int, int]]] = None,
fill_value: Optional[int] = 128,
interp_type: Optional[types.DALIInterpType] = None,
max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None,
seed: Optional[int] = None,
monotonic_mag: bool = True,
excluded: Optional[List[str]] = None,
) -> _DataNode:
"""
Applies RandAugment (https://arxiv.org/abs/1909.13719) augmentation scheme to the
provided batch of samples.
Args
----
data : DataNode
A batch of samples to be processed. The supported samples are images
of `HWC` layout and videos of `FHWC` layout, the supported data type is `uint8`.
n: int
The number of randomly sampled operations to be applied to a sample.
m: int
A magnitude (strength) of each operation to be applied, it must be an integer
within ``[0, num_magnitude_bins - 1]``.
num_magnitude_bins: int, optional
The number of bins to divide the magnitude ranges into.
shape: DataNode or Tuple[int, int], optional
The size (height and width) of the image or frames in the video sequence
passed as the `data`. If specified, the magnitude of `translation` operations
depends on the image/frame shape and spans from 0 to `max_translate_rel * shape`.
Otherwise, the magnitude range is `[0, max_translate_abs]` for any sample.
fill_value: int, optional
A value to be used as a padding for images/frames transformed with warp_affine ops
(translation, shear and rotate). If `None` is specified, the images/frames are padded
with the border value repeated (clamped).
interp_type: types.DALIInterpType, optional
Interpolation method used by the warp_affine ops (translation, shear and rotate).
Supported values are `types.INTERP_LINEAR` (default) and `types.INTERP_NN`.
max_translate_abs: int or (int, int), optional
Only valid when ``shapes`` is not provided. Specifies the maximal shift (in pixels)
in the translation augmentation. If a tuple is specified, the first component limits
height, the second the width. Defaults to 100, which means the maximal magnitude
shifts the image by 100 pixels.
max_translate_rel: float or (float, float), optional
Only valid when ``shapes`` argument is provided. Specifies the maximal shift as a
fraction of image shape in the translation augmentations.
If a tuple is specified, the first component limits the height, the second the width.
Defaults to around `0.45` (100/224).
seed: int, optional
Seed to be used to randomly sample operations (and to negate magnitudes).
monotonic_mag: bool, optional
There are two flavours of RandAugment available in different frameworks. For the default
``monotonic_mag=True`` the strength of operations that accept magnitude bins increases with
the increasing bins. If set to False, the magnitude ranges for some color operations differ.
There, the :meth:`~nvidia.dali.auto_aug.augmentations.posterize` and
:meth:`~nvidia.dali.auto_aug.augmentations.solarize` strength decreases with increasing
magnitude bins and enhance operations (
:meth:`~nvidia.dali.auto_aug.augmentations.brightness`,
:meth:`~nvidia.dali.auto_aug.augmentations.contrast`,
:meth:`~nvidia.dali.auto_aug.augmentations.color`,
:meth:`~nvidia.dali.auto_aug.augmentations.sharpness`) use (0.1, 1.9) range,
which means that the strength decreases the closer the magnitudes are to the center
of the range. See
:meth:`~nvidia.dali.auto_aug.rand_augment.get_rand_augment_non_monotonic_suite`.
excluded: List[str], optional
A list of names of the operations to be excluded from the default suite of augmentations.
If, instead of just limiting the set of operations, you need to include some custom
operations or fine-tune the existing ones, you can use the
:meth:`~nvidia.dali.auto_aug.rand_augment.apply_rand_augment` directly, which accepts
a list of augmentations.
Returns
-------
DataNode
A batch of transformed samples.
"""
aug_kwargs = {"fill_value": fill_value, "interp_type": interp_type}
use_shape = shape is not None
if use_shape:
aug_kwargs["shape"] = shape
if monotonic_mag:
augmentations = get_rand_augment_suite(use_shape, max_translate_abs, max_translate_rel)
else:
augmentations = get_rand_augment_non_monotonic_suite(use_shape, max_translate_abs,
max_translate_rel)
augmentation_names = set(aug.name for aug in augmentations)
assert len(augmentation_names) == len(augmentations)
excluded = excluded or []
for name in excluded:
if name not in augmentation_names:
raise Exception(f"The `{name}` was specified in `excluded`, but the RandAugment suite "
f"does not contain augmentation with this name. "
f"The augmentations in the suite are: {', '.join(augmentation_names)}.")
selected_augments = [aug for aug in augmentations if aug.name not in excluded]
return apply_rand_augment(selected_augments, data, n, m,
num_magnitude_bins=num_magnitude_bins, seed=seed, **aug_kwargs)
def apply_rand_augment(augmentations: List[_Augmentation], data: _DataNode, n: int, m: int,
num_magnitude_bins: int = 31, seed: Optional[int] = None,
**kwargs) -> _DataNode:
"""
Applies the list of ``augmentations`` in RandAugment (https://arxiv.org/abs/1909.13719) fashion.
Each sample is transformed with ``n`` operations in a sequence randomly selected from the
``augmentations`` list. Each operation uses ``m`` as the magnitude bin.
Args
----
augmentations : List[core._Augmentation]
List of augmentations to be sampled and applied in RandAugment fashion.
data : DataNode
A batch of samples to be processed.
n: int
The number of randomly sampled operations to be applied to a sample.
m: int
A magnitude bin (strength) of each operation to be applied, it must be an integer
within ``[0, num_magnitude_bins - 1]``.
num_magnitude_bins: int
The number of bins to divide the magnitude ranges into.
seed: int
Seed to be used to randomly sample operations (and to negate magnitudes).
kwargs:
Any extra parameters to be passed when calling `augmentations`.
The signature of each augmentation is checked for any extra arguments and if
the name of the argument matches one from the `kwargs`, the value is
passed as an argument. For example, some augmentations from the default
RandAugment suite accept ``shapes``, ``fill_value`` and ``interp_type``.
Returns
-------
DataNode
A batch of transformed samples.
"""
if not isinstance(n, int) or n < 0:
raise Exception(
f"The number of operations to apply `n` must be a non-negative integer, got {n}.")
if not isinstance(num_magnitude_bins, int) or num_magnitude_bins < 1:
raise Exception(
f"The `num_magnitude_bins` must be a positive integer, got {num_magnitude_bins}.")
if not isinstance(m, int) or not 0 <= m < num_magnitude_bins:
raise Exception(f"The magnitude bin `m` must be an integer from "
f"`[0, {num_magnitude_bins - 1}]` range. Got {m}.")
if n == 0:
warnings.warn(
"The `apply_rand_augment` was called with `n=0`, "
"no augmentation will be applied.", Warning)
return data
if len(augmentations) == 0:
raise Exception("The `augmentations` list cannot be empty, unless n=0. "
"Got empty list in `apply_rand_augment` call.")
shape = tuple() if n == 1 else (n, )
op_idx = fn.random.uniform(values=list(range(len(augmentations))), seed=seed, shape=shape,
dtype=types.INT32)
use_signed_magnitudes = any(aug.randomly_negate for aug in augmentations)
mag_bin = signed_bin(m, seed=seed, shape=shape) if use_signed_magnitudes else m
_forbid_unused_kwargs(augmentations, kwargs, 'apply_rand_augment')
for level_idx in range(n):
level_mag_bin = mag_bin if not use_signed_magnitudes or n == 1 else mag_bin[level_idx]
op_kwargs = dict(data=data, magnitude_bin=level_mag_bin,
num_magnitude_bins=num_magnitude_bins, **kwargs)
level_op_idx = op_idx if n == 1 else op_idx[level_idx]
data = _pretty_select(augmentations, level_op_idx, op_kwargs,
auto_aug_name='apply_rand_augment',
ref_suite_name='get_rand_augment_suite')
return data
def get_rand_augment_suite(use_shape: bool = False, max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None) -> List[_Augmentation]:
"""
Creates a list of RandAugment augmentations.
Args
----
use_shape : bool
If true, the translation offset is computed as a percentage of the image/frame shape.
Useful if the samples processed with the auto augment have different shapes.
If false, the offsets range is bounded by a constant (`max_translate_abs`).
max_translate_abs: int or (int, int), optional
Only valid with use_shape=False, specifies the maximal shift (in pixels) in the translation
augmentations. If a tuple is specified, the first component limits height, the second the
width. Defaults 100.
max_translate_rel: float or (float, float), optional
Only valid with use_shape=True, specifies the maximal shift as a fraction of image/frame
shape in the translation augmentations. If a tuple is specified, the first component limits
height, the second the width. Defaults to around `0.45` (100/224).
"""
default_translate_abs, default_translate_rel = 100, 100 / 224
# translations = [translate_x, translate_y] with adjusted magnitude range
translations = _get_translations(use_shape, default_translate_abs, default_translate_rel,
max_translate_abs, max_translate_rel)
# [.augmentation((mag_low, mag_high), randomly_negate_mag, magnitude_to_param_custom_mapping]
return translations + [
a.shear_x.augmentation((0, 0.3), True),
a.shear_y.augmentation((0, 0.3), True),
a.rotate.augmentation((0, 30), True),
a.brightness.augmentation((0, 0.9), True, a.shift_enhance_range),
a.contrast.augmentation((0, 0.9), True, a.shift_enhance_range),
a.color.augmentation((0, 0.9), True, a.shift_enhance_range),
a.sharpness.augmentation((0, 0.9), True, a.sharpness_kernel),
a.posterize.augmentation((8, 4), False, a.poster_mask_uint8),
# solarization strength increases with decreasing magnitude (threshold)
a.solarize.augmentation((256, 0)),
a.equalize,
a.auto_contrast,
a.identity,
]
def get_rand_augment_non_monotonic_suite(
use_shape: bool = False, max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None) -> List[_Augmentation]:
"""
Similarly to :meth:`~nvidia.dali.auto_aug.rand_augment.get_rand_augment_suite` creates a list
of RandAugment augmentations.
This variant uses brightness, contrast, color, sharpness, posterize, and solarize
with magnitude ranges as used by the AutoAugment. However, those ranges do not meet
the intuition that the bigger magnitude bin corresponds to stronger operation.
"""
default_translate_abs, default_translate_rel = 100, 100 / 224
# translations = [translate_x, translate_y] with adjusted magnitude range
translations = _get_translations(use_shape, default_translate_abs, default_translate_rel,
max_translate_abs, max_translate_rel)
return translations + [
a.shear_x.augmentation((0, 0.3), True),
a.shear_y.augmentation((0, 0.3), True),
a.rotate.augmentation((0, 30), True),
a.brightness.augmentation((0.1, 1.9), False, None),
a.contrast.augmentation((0.1, 1.9), False, None),
a.color.augmentation((0.1, 1.9), False, None),
a.sharpness.augmentation((0.1, 1.9), False, a.sharpness_kernel_shifted),
a.posterize.augmentation((0, 4), False, a.poster_mask_uint8),
a.solarize.augmentation((0, 256), False, None),
a.equalize,
a.auto_contrast,
a.identity,
]
|
DALI-main
|
dali/python/nvidia/dali/auto_aug/rand_augment.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import numpy as np
except ImportError:
raise RuntimeError(
"Could not import numpy. DALI's automatic augmentation examples depend on numpy. "
"Please install numpy to use the examples.")
from nvidia.dali import fn
from nvidia.dali import types
from nvidia.dali.auto_aug.core import augmentation
"""
This module contains a standard suite of augmentations used by AutoAugment policy for ImageNet,
RandAugment and TrivialAugmentWide. The augmentations are implemented in terms of DALI operators.
The `@augmentation` decorator handles computation of the decorated transformations's parameter.
When called, the decorated augmentation expects:
* a single positional argument: batch of samples
* `magnitude_bin` and `num_magnitude_bins` instead of the parameter.
The parameter is computed as if by calling
`mag_to_param(magnitudes[magnitude_bin] * ((-1) ** random_sign))`, where
`magnitudes=linspace(mag_range[0], mag_range[1], num_magnitude_bins)`.
The augmentations in this module are defined with example setups passed
to `@augmentation`. The parameters can be easily adjusted. For instance, to increase
the magnitudes range of `shear_x` from 0.3 to 0.5, you can create
`my_shear_x = shear_x.augmentation(mag_range=(0, 0.5))`.
"""
def warp_x_param(magnitude):
return [magnitude, 0]
def warp_y_param(magnitude):
return [0, magnitude]
@augmentation(mag_range=(0, 0.3), randomly_negate=True, mag_to_param=warp_x_param)
def shear_x(data, shear, fill_value=128, interp_type=None):
mt = fn.transforms.shear(shear=shear)
return fn.warp_affine(data, matrix=mt, fill_value=fill_value, interp_type=interp_type,
inverse_map=False)
@augmentation(mag_range=(0, 0.3), randomly_negate=True, mag_to_param=warp_y_param)
def shear_y(data, shear, fill_value=128, interp_type=None):
mt = fn.transforms.shear(shear=shear)
return fn.warp_affine(data, matrix=mt, fill_value=fill_value, interp_type=interp_type,
inverse_map=False)
@augmentation(mag_range=(0., 1.), randomly_negate=True, mag_to_param=warp_x_param)
def translate_x(data, rel_offset, shape, fill_value=128, interp_type=None):
offset = rel_offset * shape[1]
mt = fn.transforms.translation(offset=offset)
return fn.warp_affine(data, matrix=mt, fill_value=fill_value, interp_type=interp_type,
inverse_map=False)
@augmentation(mag_range=(0, 250), randomly_negate=True, mag_to_param=warp_x_param,
name="translate_x")
def translate_x_no_shape(data, offset, fill_value=128, interp_type=None):
mt = fn.transforms.translation(offset=offset)
return fn.warp_affine(data, matrix=mt, fill_value=fill_value, interp_type=interp_type,
inverse_map=False)
@augmentation(mag_range=(0., 1.), randomly_negate=True, mag_to_param=warp_y_param)
def translate_y(data, rel_offset, shape, fill_value=128, interp_type=None):
offset = rel_offset * shape[0]
mt = fn.transforms.translation(offset=offset)
return fn.warp_affine(data, matrix=mt, fill_value=fill_value, interp_type=interp_type,
inverse_map=False)
@augmentation(mag_range=(0, 250), randomly_negate=True, mag_to_param=warp_y_param,
name="translate_y")
def translate_y_no_shape(data, offset, fill_value=128, interp_type=None):
mt = fn.transforms.translation(offset=offset)
return fn.warp_affine(data, matrix=mt, fill_value=fill_value, interp_type=interp_type,
inverse_map=False)
@augmentation(mag_range=(0, 30), randomly_negate=True)
def rotate(data, angle, fill_value=128, interp_type=None, rotate_keep_size=True):
return fn.rotate(data, angle=angle, fill_value=fill_value, interp_type=interp_type,
keep_size=rotate_keep_size)
def shift_enhance_range(magnitude):
"""The `enhance` operations (brightness, contrast, color, sharpness) accept magnitudes
from [0, 2] range. However, the neutral magnitude is not 0 but 1 and the intuitive strength
of the operation increases the further the magnitude is from 1. So, we specify magnitudes range
to be in [0, 1] range, expect it to be randomly negated and then shift it by 1"""
return 1 + magnitude
@augmentation(mag_range=(0, 0.9), randomly_negate=True, mag_to_param=shift_enhance_range)
def brightness(data, parameter):
return fn.brightness(data, brightness=parameter)
@augmentation(mag_range=(0, 0.9), randomly_negate=True, mag_to_param=shift_enhance_range)
def contrast(data, parameter):
"""
It follows PIL implementation of Contrast enhancement which uses a channel-weighted
mean as a contrast center.
"""
# assumes FHWC or HWC layout
mean = fn.reductions.mean(data, axis_names="HW", keep_dims=True)
rgb_weights = types.Constant(np.array([0.299, 0.587, 0.114], dtype=np.float32))
center = fn.reductions.sum(mean * rgb_weights, axis_names="C", keep_dims=True)
# it could be just `fn.contrast(data, contrast=parameter, contrast_center=center)`
# but for GPU `data` the `center` is in GPU mem, and that cannot be passed
# as named arg (i.e. `contrast_center`) to the operator
return fn.cast_like(center + (data - center) * parameter, data)
@augmentation(mag_range=(0, 0.9), randomly_negate=True, mag_to_param=shift_enhance_range)
def color(data, parameter):
return fn.saturation(data, saturation=parameter)
def sharpness_kernel(magnitude):
# assumes magnitude: [-1, 1]
blur = np.array([[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=np.float32) / 13
ident = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.float32)
return -magnitude * blur + (1 + magnitude) * ident
def sharpness_kernel_shifted(magnitude):
# assumes magnitude: [0, 2]
return sharpness_kernel(magnitude - 1)
@augmentation(mag_range=(0, 0.9), randomly_negate=True, mag_to_param=sharpness_kernel,
param_device="auto")
def sharpness(data, kernel):
"""
The outputs correspond to PIL's ImageEnhance.Sharpness with the exception for 1px
border around the output. PIL computes convolution with smoothing filter only for
valid positions (no out-of-bounds filter positions) and pads the output with the input.
"""
return fn.experimental.filter(data, kernel)
def poster_mask_uint8(magnitude):
# expects [0..8] where 8 yields identity mask and a 0
# would be a mask that zeros all bits,
# however, following the implementation for AA and RA referred
# in the paper https://arxiv.org/pdf/1909.13719.pdf, we remap 0 to 1,
# to avoid completely blank images
magnitude = np.round(magnitude).astype(np.uint32)
if magnitude <= 0:
magnitude = 1
elif magnitude > 8:
magnitude = 8
nbits = np.round(8 - magnitude).astype(np.uint32)
removal_mask = np.uint8(2)**nbits - 1
return np.array(np.uint8(255) ^ removal_mask, dtype=np.uint8)
@augmentation(mag_range=(0, 4), mag_to_param=poster_mask_uint8, param_device="auto")
def posterize(data, mask):
return data & mask
@augmentation(mag_range=(256, 0), param_device="auto")
def solarize(data, threshold):
sample_inv = types.Constant(255, dtype=types.UINT8) - data
mask_unchanged = data < threshold
mask_inverted = mask_unchanged ^ True
return mask_unchanged * data + mask_inverted * sample_inv
def solarize_add_shift(shift):
if shift >= 128:
raise Exception("The solarize_add augmentation accepts shifts from 0 to 128")
return np.uint8(shift)
@augmentation(mag_range=(0, 110), param_device="auto", mag_to_param=solarize_add_shift)
def solarize_add(data, shift):
mask_shifted = data < types.Constant(128, dtype=types.UINT8)
mask_id = mask_shifted ^ True
sample_shifted = data + shift
return mask_shifted * sample_shifted + mask_id * data
@augmentation
def invert(data, _):
return types.Constant(255, dtype=types.UINT8) - data
@augmentation
def equalize(data, _):
"""
DALI's equalize follows OpenCV's histogram equalization.
The PIL uses slightly different formula when transforming histogram's
cumulative sum into lookup table.
"""
return fn.experimental.equalize(data)
@augmentation
def auto_contrast(data, _):
# assumes FHWC or HWC layout
lo = fn.reductions.min(data, axis_names="HW", keep_dims=True)
hi = fn.reductions.max(data, axis_names="HW", keep_dims=True)
diff = hi - lo
mask_scale = diff > 0
mask_id = mask_scale ^ True
# choose div so that scale ends up being 255 / diff if diff > 0 and 1 otherwise
div_by = diff * mask_scale + types.Constant(255, dtype=types.UINT8) * mask_id
scale = 255 / div_by
scaled = (data - lo * mask_scale) * scale
return fn.cast_like(scaled, data)
@augmentation
def identity(data, _):
return data
|
DALI-main
|
dali/python/nvidia/dali/auto_aug/augmentations.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from nvidia.dali.data_node import DataNode as _DataNode
from nvidia.dali.auto_aug.core._augmentation import Augmentation
def split_samples_among_ops(op_range_lo: int, op_range_hi: int, ops: List[Augmentation],
selected_op_idx: _DataNode, op_args, op_kwargs):
assert op_range_lo <= op_range_hi
if op_range_lo == op_range_hi:
return ops[op_range_lo](*op_args, **op_kwargs)
mid = (op_range_lo + op_range_hi) // 2
if selected_op_idx <= mid:
return split_samples_among_ops(op_range_lo, mid, ops, selected_op_idx, op_args, op_kwargs)
else:
return split_samples_among_ops(mid + 1, op_range_hi, ops, selected_op_idx, op_args,
op_kwargs)
def select(ops: List[Augmentation], selected_op_idx: _DataNode, *op_args, **op_kwargs):
"""
Applies the operator from the operators list based on the provided index as if by calling
`ops[selected_op_idx](**op_kwargs)`.
The `selected_op_idx` must be a batch of indices from [0, len(ops) - 1] range. The `op_kwargs`
can contain other data nodes, they will be split into partial batches accordingly.
"""
return split_samples_among_ops(0, len(ops) - 1, ops, selected_op_idx, op_args, op_kwargs)
|
DALI-main
|
dali/python/nvidia/dali/auto_aug/core/_select.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Type, Tuple, Optional, Union
from nvidia.dali.data_node import DataNode as _DataNode
from nvidia.dali.auto_aug.core._augmentation import Augmentation
try:
import numpy as np
except ImportError:
raise RuntimeError(
"Could not import numpy. DALI's automatic augmentation examples depend on numpy. "
"Please install numpy to use the examples.")
from numpy import typing as npt
def augmentation(function: Optional[Callable[..., _DataNode]] = None, *,
mag_range: Optional[Union[Tuple[float, float], np.ndarray]] = None,
randomly_negate: Optional[bool] = None,
mag_to_param: Optional[Callable[[float], npt.ArrayLike]] = None,
param_device: Optional[str] = None, name: Optional[str] = None,
augmentation_cls: Optional[Type[Augmentation]] = None):
"""
A decorator turning transformations implemented with DALI into augmentations that
can be used by automatic augmentations (e.g. AutoAugment, RandAugment, TrivialAugment).
The `function` must accept at least two args: a sample and a parameter.
The decorator handles computation of the parameter. Instead of the parameter, the
decorated augmentation accepts magnitude bin and the total number of bins.
Then, the bin is used to compute the parameter as if by calling
`mag_to_param(magnitudes[magnitude_bin] * ((-1) ** random_sign))`, where
`magnitudes=linspace(mag_range[0], mag_range[1], num_magnitude_bins)`.
Args
----
function : callable
A function that accepts at least two positional args: a batch
(represented as DataNode) to be processed, and a parameter of the transformation.
The function must use DALI operators to process the batch and return a single output
of such processing.
mag_range : (number, number) or np.ndarray
Specifies the range of applicable magnitudes for the operation.
If the tuple is provided, the magnitudes will be computed as
`linspace(mag_range[0], mag_range[1], num_magnitude_bins)`.
If the np.ndarray is provided, it will be used directly instead of the linspace.
If no `mag_range` is specified, the parameter passed to the `function` will be `None`.
randomly_negate: bool
If `True`, the magnitude from the mag_range will be randomly negated for every sample.
mag_to_param: callable
A callback that transforms the magnitude into a parameter. The parameter will be passed to
the decorated operation instead of the plain magnitude. This way, the parameters for the
range of magnitudes can be computed once in advance and stored as a Constant node.
Note, the callback must return numpy arrays or data directly convertible to numpy arrays
(in particular, no pipelined DALI operators can be used in the callback).
The output type and dimensionality must be consistent and not depend on the magnitude.
param_device: str
A "cpu", "gpu", or "auto"; defaults to "cpu". Describes where to store the precomputed
parameters (i.e. the `mag_to_param` outputs). If "auto" is specified, the CPU or GPU
backend will be selected to match the `sample`'s backend.
name: str
Name of the augmentation. By default, the name of the decorated function is used.
Returns
-------
Augmentation
The operation wrapped with the Augmentation class so that it can be used with the `auto_aug`
transforms.
"""
def decorator(function):
cls = augmentation_cls or Augmentation
return cls(function, mag_range=mag_range, mag_to_param=mag_to_param,
randomly_negate=randomly_negate, param_device=param_device, name=name)
if function is None:
return decorator
else:
if not callable(function):
raise Exception(f"The `@augmentation` decorator was used to decorate the object that "
f"is not callable: {function}.")
elif isinstance(function, Augmentation):
# it's not clear if we should go with "update the setup" or
# "discard and create" semantics here
raise Exception(
f"The `@augmentation` was applied to already decorated Augmentation. "
f"Please call `{function.name}.augmentation` method to modify the augmentation "
f"setup or apply the decorator to the underlying `{function.name}.op` directly.\n"
f"Error in augmentation: {function}.")
return decorator(function)
|
DALI-main
|
dali/python/nvidia/dali/auto_aug/core/decorator.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
class MissingArgException(Exception):
def __init__(self, message, augmentation, missing_args):
super().__init__(message)
self.augmentation = augmentation
self.missing_args = missing_args
class UnusedArgException(Exception):
def __init__(self, message, unused_args):
super().__init__(message)
self.unused_args = unused_args
def filter_extra_accepted_kwargs(fun, kwargs, skip_positional=0):
"""
Returns sub-dict of `kwargs` with the keys that match the
names of arguments in `fun`'s signature.
"""
sig = inspect.signature(fun)
# the params from signature with up to skip_positional filtered out
# (less only if there is not enough of positional args)
params = [(name, param) for i, (name, param) in enumerate(sig.parameters.items())
if i >= skip_positional or param.kind not in
[inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY]]
extra = [
name for (name, param) in params
if param.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY]
]
return {name: value for name, value in kwargs.items() if name in extra}
def get_required_kwargs(fun, skip_positional=0):
"""
Returns the list of names of args/kwargs without defaults from
`fun` signature.
"""
sig = inspect.signature(fun)
# the params from signature with up to skip_positional filtered out
# (less only if there is not enough of positional args)
params = [(name, param) for i, (name, param) in enumerate(sig.parameters.items())
if i >= skip_positional or param.kind not in
[inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY]]
return [
name for name, param in params if param.default is inspect.Parameter.empty
and param.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY]
]
def get_num_positional_args(fun):
"""
Returns the number of arguments that can be passed positionally to the `fun` call.
"""
sig = inspect.signature(fun)
return len([
name for name, param in sig.parameters.items() if param.kind in
[inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY]
])
def get_missing_kwargs(fun, kwargs, skip_positional=0):
required = get_required_kwargs(fun, skip_positional=skip_positional)
return [name for name in required if name not in kwargs]
def filter_unused_args(augmentations, kwargs):
used_kwargs = set(kwarg_name for augment in augmentations
for kwarg_name in filter_extra_accepted_kwargs(augment.op, kwargs, 2))
return [kwarg_name for kwarg_name in kwargs if kwarg_name not in used_kwargs]
def forbid_unused_kwargs(augmentations, kwargs, call_name):
unused_args = filter_unused_args(augmentations, kwargs)
if unused_args:
subject, verb = ("kwarg", "is") if len(unused_args) == 1 else ("kwargs", "are")
unused_kwargs_str = ", ".join(unused_args)
raise UnusedArgException(
f"The {call_name} got unexpected {subject}. "
f"The {subject} `{unused_kwargs_str}` {verb} not used by any of the augmentations.",
unused_args=unused_args)
|
DALI-main
|
dali/python/nvidia/dali/auto_aug/core/_args.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from nvidia.dali.auto_aug.core._augmentation import Augmentation
from typing import Optional, Sequence, Tuple
class Policy:
def __init__(self, name: str, num_magnitude_bins: int,
sub_policies: Sequence[Sequence[Tuple[Augmentation, float, Optional[int]]]]):
"""
Describes the augmentation policy as introduced in AutoAugment
(https://arxiv.org/abs/1805.09501).
Args
----
name : str
A name of the policy, for presentation purposes.
num_magnitude_bins : int
The number of bins that augmentations' magnitude ranges should be divided into.
sub_policies: Sequence[Sequence[Tuple[Augmentation, float, Optional[int]]]]
A list of sequences of transformations. For each processed sample, one of the
sequences is chosen uniformly at random. Then, the tuples from the sequence
are considered one by one. Each tuple describes what augmentation to apply at
that point, what is the probability of skipping the augmentation at that time
and what magnitude to use with the augmentation.
"""
self.name = name
self.num_magnitude_bins = num_magnitude_bins
if not isinstance(num_magnitude_bins, int) or num_magnitude_bins < 1:
raise Exception(
f"The `num_magnitude_bins` must be a positive integer, got {num_magnitude_bins}.")
if not isinstance(sub_policies, (list, tuple)):
raise Exception(f"The `sub_policies` must be a list or tuple of sub policies, "
f"got {type(sub_policies)}.")
for sub_policy in sub_policies:
if not isinstance(sub_policy, (list, tuple)):
raise Exception(f"Each sub policy must be a list or tuple, got {sub_policy}.")
for op_desc in sub_policy:
if not isinstance(op_desc, (list, tuple)) or len(op_desc) != 3:
raise Exception(f"Each operation in sub policy must be specified as a triple: "
f"(augmentation, probability, magnitude). Got {op_desc}.")
aug, p, mag = op_desc
if not isinstance(aug, Augmentation):
raise Exception(
f"Each augmentation in sub policies must be an instance of "
f"Augmentation. Got `{aug}`. Did you forget to use `@augmentation` "
f"decorator?")
if not isinstance(p, (float, int)) or not 0 <= p <= 1:
raise Exception(
f"Probability of applying the augmentation must be a number from "
f"`[0, 1]` range. Got `{p}` for augmentation `{aug.name}`.")
if p == 0:
warnings.warn(f"The augmentation `{aug.name}` in policy `{name}` is used with "
f"probability 0 in one of the sub-policies.")
if mag is None:
if aug.mag_range is not None:
raise Exception(
f"The augmentation `{aug.name}` has `mag_range` specified, so the "
f"magnitude bin is required. However, got `None` in the policy "
f"`{name}`.")
else:
if aug.mag_range is None:
warnings.warn(
f"The magnitude bin `{mag}` for augmentation `{aug.name}` in policy "
f"`{name}` will be ignored. The augmentation does not accept "
f"magnitudes (as it has no `mag_range` specified). You can specify "
f"`None` instead of `{mag}` to silence this warning.")
if not isinstance(mag, int) or not 0 <= mag < self.num_magnitude_bins:
raise Exception(f"Magnitude of the augmentation must be an integer from "
f"`[0, {num_magnitude_bins - 1}]` range. "
f"Got `{mag}` for augmentation `{aug.name}`.")
self.sub_policies = _sub_policy_with_unique_names(sub_policies)
@property
def augmentations(self):
augments = set(aug for sub_policy in self.sub_policies for aug, p, mag in sub_policy)
augments = sorted(list(augments), key=lambda aug: aug.name)
return {augment.name: augment for augment in augments}
def __repr__(self):
sub_policies_repr = ",\n\t".join(
repr([(augment.name, p, mag) for augment, p, mag in sub_policy])
for sub_policy in self.sub_policies)
sub_policies_repr_sep = "" if not sub_policies_repr else "\n\t"
augmentations_repr = ",\n\t".join(f"'{name}': {repr(augment)}"
for name, augment in self.augmentations.items())
augmentations_repr_sep = "" if not augmentations_repr else "\n\t"
return (
f"Policy(name={repr(self.name)}, num_magnitude_bins={repr(self.num_magnitude_bins)}, "
f"sub_policies=[{sub_policies_repr_sep}{sub_policies_repr}], "
f"augmentations={{{augmentations_repr_sep}{augmentations_repr}}})")
def _sub_policy_with_unique_names(
sub_policies: Sequence[Sequence[Tuple[Augmentation, float, Optional[int]]]]
) -> Sequence[Sequence[Tuple[Augmentation, float, Optional[int]]]]:
"""
Check if the augmentations used in the sub-policies have unique names.
If not, rename them by adding enumeration to the names.
The aim is to have non-ambiguous presentation.
"""
all_augments = [aug for sub_policy in sub_policies for aug, p, mag in sub_policy]
augments = set(all_augments)
names = set(aug.name for aug in augments)
if len(names) == len(augments):
return tuple(tuple(sub_policy) for sub_policy in sub_policies)
num_digits = len(str(len(augments) - 1))
remap_aug = {}
i = 0
for augment in all_augments:
if augment not in remap_aug:
remap_aug[augment] = augment.augmentation(
name=f"{str(i).zfill(num_digits)}__{augment.name}")
i += 1
return tuple(
tuple((remap_aug[aug], p, mag) for aug, p, mag in sub_policy)
for sub_policy in sub_policies)
|
DALI-main
|
dali/python/nvidia/dali/auto_aug/core/policy.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.auto_aug.core._augmentation import signed_bin, Augmentation as _Augmentation
from nvidia.dali.auto_aug.core.decorator import augmentation
from nvidia.dali.auto_aug.core._select import select
from nvidia.dali.auto_aug.core.policy import Policy
__all__ = ("signed_bin", "augmentation", "select", "Policy", "_Augmentation")
|
DALI-main
|
dali/python/nvidia/dali/auto_aug/core/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Callable, Tuple, Optional, Union
from nvidia.dali import fn, types
from nvidia.dali.data_node import DataNode as _DataNode
from nvidia.dali.auto_aug.core._args import filter_extra_accepted_kwargs, \
get_missing_kwargs, get_num_positional_args, MissingArgException
try:
import numpy as np
except ImportError:
raise RuntimeError(
"Could not import numpy. DALI's automatic augmentation examples depend on numpy. "
"Please install numpy to use the examples.")
from numpy import typing as npt
class _UndefinedParam:
"""Use _UndefinedParam as a kwarg default when it matters to distinguish between `kwarg=None`
and not specifying the kwarg"""
class _SignedMagnitudeBin:
def __init__(self, magnitude_bin: Union[int, _DataNode], random_sign: _DataNode,
signed_magnitude_idx: _DataNode):
self._magnitude_bin = magnitude_bin
self._random_sign = random_sign
self._signed_magnitude_idx = signed_magnitude_idx
def __getitem__(self, idx: int):
"""
Indexing simplifies creation of "signed magnitude bins" in cases when a single sample
may be processed by a sequence of random augmentations - we can sample random signs once
for the full sequence and then use indexing to access each single signed magnitude bin.
"""
if isinstance(self._magnitude_bin, int):
magnitude_bin = self._magnitude_bin
else:
magnitude_bin = self._magnitude_bin[idx]
cls = self.__class__
return cls(magnitude_bin, self._random_sign[idx], self._signed_magnitude_idx[idx])
@classmethod
def create_from_bin(cls, magnitude_bin: Union[int, _DataNode],
random_sign: Optional[_DataNode] = None, seed: Optional[int] = None,
shape: Optional[Tuple] = None):
if not isinstance(magnitude_bin, (int, _DataNode)):
raise Exception(f"The `magnitude_bin` must be an int or _DataNode (output of DALI op "
f"or `types.Constant`) representing batch of ints from "
f"`[0..num_magnitude_bins-1]` range. Got {magnitude_bin} instead.")
if random_sign is not None and any(arg is not None for arg in (seed, shape)):
raise Exception(
"The `random_sign` cannot be specified together with neither `seed` nor `shape`.")
if random_sign is None:
random_sign = fn.random.uniform(values=[0, 1], dtype=types.INT32, seed=seed,
shape=shape)
# it is important to compute it as soon as possible - we may be created at the top level
# in the pipeline, while it may be read in conditional split
signed_magnitude_idx = 2 * magnitude_bin + random_sign
return cls(magnitude_bin, random_sign, signed_magnitude_idx)
@staticmethod
def _remap_to_signed_magnitudes(magnitudes):
def remap_bin_idx(bin_idx):
magnitude = magnitudes[bin_idx // 2]
if bin_idx % 2:
magnitude = -magnitude
return magnitude
return np.array([remap_bin_idx(bin_idx) for bin_idx in range(2 * len(magnitudes))])
@property
def bin(self):
return self._magnitude_bin
@property
def random_sign(self):
return self._random_sign
@property
def signed_magnitude_idx(self):
return self._signed_magnitude_idx
def signed_bin(magnitude_bin: Union[int, _DataNode], random_sign: Optional[_DataNode] = None,
seed: Optional[int] = None, shape: Optional[Tuple] = None) -> _SignedMagnitudeBin:
"""
Combines the `magnitude_bin` with information about the sign of the magnitude.
The Augmentation wrapper can generate and handle the random sign on its own. Yet,
if the augmentation is called inside conditional split, it is better to combine
magnitude_bins and sign in advance, before the split, so that the sign handling is done
once for the whole batch rather than multiple times for each op operating on the split batch.
Args
----
magnitude_bin: int or DataNode
The magnitude bin from range `[0, num_magnitude_bins - 1]`. Can be plain int or
a batch (_DataNode) of ints.
random_sign : DataNode, optional
A batch of {0, 1} integers. For augmentations declared with `randomly_negate=True`,
it determines if the magnitude is negated (for 1) or not (for 0).
"""
return _SignedMagnitudeBin.create_from_bin(magnitude_bin, random_sign, seed, shape)
class Augmentation:
"""
Wrapper for transformations implemented with DALI that are meant to be used with
automatic augmentations. You should not need to instantiate this class directly,
use `@augmentation` decorator instead.
"""
def __init__(
self,
op: Callable[..., _DataNode],
mag_range: Optional[Union[Tuple[float, float], np.ndarray]] = None,
randomly_negate: Optional[bool] = None,
mag_to_param: Optional[Callable[[float], npt.ArrayLike]] = None,
param_device: Optional[str] = None,
name: Optional[str] = None,
):
self._op = op
self._mag_range = mag_range
self._randomly_negate = randomly_negate
self._mag_to_param = mag_to_param
self._param_device = param_device
self._name = name
self._validate_op_sig()
def __repr__(self):
params = [
f"{name}={repr(val)}" for name, val in self._get_config().items() if val is not None
]
return f"Augmentation({', '.join([repr(self.op)] + params)})"
def __call__(self, data: _DataNode, *,
magnitude_bin: Optional[Union[int, _DataNode, _SignedMagnitudeBin]] = None,
num_magnitude_bins: Optional[int] = None, **kwargs) -> _DataNode:
"""
Applies the decorated transformation to the `data` as if by calling
`self.op(data, param, **kwargs)` where
`param = mag_to_param(magnitudes[magnitude_bin] * ((-1) ** random_sign))`.
Args
----
data : DataNode
A batch of samples to be transformed.
magnitude_bin: int, DataNode, or _SignedMagnitudeBin
The magnitude bin from range `[0, num_magnitude_bins - 1]`. The bin is used to get
parameter for the transformation. The parameter is computed as if by calling
`mag_to_param(magnitudes[magnitude_bin] * ((-1) ** random_sign))`, where
`magnitudes=linspace(mag_range[0], mag_range[1], num_magnitude_bins)`.
If the `mag_range` is custom `np.ndarray`, it will be used as `magnitudes` directly.
num_magnitude_bins: int
The total number of magnitude bins (limits the accepted range of
`magnitude_bin` to `[0, num_magnitude_bins - 1]`).
kwargs
Dictionary with extra arguments to pass to the `self.op`. The op's signature
is checked for any additional arguments (apart from the ``data`` and ``parameter``) and
the arguments with matching names are passed to the call.
Returns
-------
DataNode
A batch of transformed samples.
"""
num_mandatory_positional_args = 2
param_device = self._infer_param_device(data)
params = self._get_param(magnitude_bin, num_magnitude_bins, param_device)
op_kwargs = filter_extra_accepted_kwargs(self.op, kwargs, num_mandatory_positional_args)
missing_args = get_missing_kwargs(self.op, kwargs, num_mandatory_positional_args)
if missing_args:
raise MissingArgException(
f"The augmentation `{self.name}` requires following named argument(s) "
f"which were not provided to the call: {', '.join(missing_args)}. "
f"Please make sure to pass the required arguments when calling the "
f"augmentation.", augmentation=self, missing_args=missing_args)
return self.op(data, params, **op_kwargs)
@property
def op(self):
return self._op
@property
def mag_range(self):
return self._mag_range
@property
def mag_to_param(self):
return self._mag_to_param or _np_wrap
@property
def randomly_negate(self):
return self._randomly_negate or False
@property
def param_device(self):
return self._param_device or "cpu"
@property
def name(self):
return self._name or self.op.__name__
def augmentation(self, mag_range=_UndefinedParam, randomly_negate=_UndefinedParam,
mag_to_param=_UndefinedParam, param_device=_UndefinedParam,
name=_UndefinedParam, augmentation_cls=None):
"""
The method to update augmentation parameters specified with `@augmentation` decorator.
Returns a new augmentation with the original operation decorated but updated parameters.
Parameters that are not specified are kept as in the initial augmentation.
"""
cls = augmentation_cls or self.__class__
config = self._get_config()
for key, value in dict(mag_range=mag_range, randomly_negate=randomly_negate,
mag_to_param=mag_to_param, param_device=param_device,
name=name).items():
assert key in config
if value is not _UndefinedParam:
config[key] = value
return cls(self.op, **config)
def _get_config(self):
return {
"mag_range": self._mag_range,
"mag_to_param": self._mag_to_param,
"randomly_negate": self._randomly_negate,
"param_device": self._param_device,
"name": self._name,
}
def _infer_param_device(self, sample: _DataNode):
if self.param_device != "auto":
return self.param_device
return sample.device or "cpu"
def _has_custom_magnitudes(self):
return isinstance(self.mag_range, np.ndarray)
def _map_mag_to_param(self, magnitude):
param = self.mag_to_param(magnitude)
if _contains_data_node(param):
raise Exception(
f"The `mag_to_param` callback of `{self.name}` augmentation returned `DataNode`, "
f"i.e. an output of DALI pipelined operator, which is not supported there. "
f"Instead, the `mag_to_param` callback must return parameter that is `np.ndarray` "
f"or is directly convertible to `np.ndarray`, so that the all parameters can "
f"be precomputed and reused across iterations.\n\n"
f"You can move DALI operators from `mag_to_param` callback to the "
f"decorated augmentation code or replace the DALI operators in `mag_to_param` "
f"callback with their `dali.experimental.eger` counterparts.\n\n"
f"Error in augmentation: {self}.")
return np.array(param)
def _map_mags_to_params(self, magnitudes):
params = [self._map_mag_to_param(magnitude) for magnitude in magnitudes]
if len(params) >= 2:
ref_shape = params[0].shape
ref_dtype = params[0].dtype
for param, mag in zip(params, magnitudes):
if param.shape != ref_shape or param.dtype != ref_dtype:
raise Exception(
f"The `mag_to_param` callback of `{self.name}` augmentation must return "
f"the arrays of the same type and shape for different magnitudes. "
f"Got param of shape {ref_shape} and {ref_dtype} type for magnitude "
f"{magnitudes[0]}, but for magnitude {mag} the returned array "
f"has shape {param.shape} and type {param.dtype}.\n\n"
f"Error in augmentation: {self}.")
return np.array(params)
def _get_magnitudes(self, num_magnitude_bins):
mag_range = self.mag_range
if mag_range is None:
return None
if self._has_custom_magnitudes():
if num_magnitude_bins is not None and len(mag_range) != num_magnitude_bins:
raise Exception(
f"The augmentation `{self.name}` has nd.array of length {len(mag_range)} "
f"specified as the `mag_range`. However, the `num_magnitude_bins` "
f"passed to the call is {num_magnitude_bins}.")
return mag_range
if num_magnitude_bins is None:
raise Exception(
f"The `num_magnitude_bins` argument is missing in the call of "
f"the `{self.name}` augmentation. Please specify the `num_magnitude_bins` "
f"along with the samples and magnitude_bin."
f"\nError in augmentation: {self}.")
if not hasattr(mag_range, '__len__') or len(mag_range) != 2:
raise Exception(
f"The `mag_range` must be a tuple of (low, high) ends of magnitude range or "
f"nd.array of explicitly defined magnitudes. Got `{self.mag_range}` for "
f"augmentation `{self.name}`.")
lo, hi = mag_range
return np.linspace(lo, hi, num_magnitude_bins, dtype=np.float32)
def _get_param(self, magnitude_bin, num_magnitude_bins, param_device):
magnitudes = self._get_magnitudes(num_magnitude_bins)
if magnitudes is None:
return None
if magnitude_bin is None:
raise Exception(
f"The augmentation `{self.name}` has `mag_range` specified, so when called, "
f"it requires `magnitude_bin` parameter to select the magnitude from the "
f"`mag_range`.\nError in augmentation: {self}.")
if self.randomly_negate and not isinstance(magnitude_bin, _SignedMagnitudeBin):
magnitude_bin = signed_bin(magnitude_bin)
warnings.warn(
f"The augmentation `{self.name}` was declared with `random_negate=True`, "
f"but unsigned `magnitude_bin` was passed to the augmentation call. "
f"The augmentation will randomly negate the magnitudes manually. "
f"However, for better performance, if you conditionally split batch "
f"between multiple augmentations, it is better to call "
f"`signed_magnitude_bin = signed_bin(magnitude_bin)` before the split "
f"and pass the signed bins instead.", Warning)
if self.randomly_negate:
assert isinstance(magnitude_bin, _SignedMagnitudeBin) # by the two checks above
if isinstance(magnitude_bin.bin, int):
magnitudes = [magnitudes[magnitude_bin.bin]]
param_idx = magnitude_bin.random_sign
else:
param_idx = magnitude_bin.signed_magnitude_idx
magnitudes = _SignedMagnitudeBin._remap_to_signed_magnitudes(magnitudes)
params = self._map_mags_to_params(magnitudes)
params = types.Constant(params, device=param_device)
return params[param_idx]
else:
# other augmentations in the suite may need sign and we got it along the magnitude bin,
# just unpack the plain magnitude bin
bin_idx = magnitude_bin.bin if isinstance(magnitude_bin,
_SignedMagnitudeBin) else magnitude_bin
if isinstance(bin_idx, int):
magnitude = magnitudes[bin_idx]
param = self._map_mag_to_param(magnitude)
return types.Constant(param, device=param_device)
else:
params = self._map_mags_to_params(magnitudes)
params = types.Constant(params, device=param_device)
return params[bin_idx]
def _validate_op_sig(self):
num_positional = get_num_positional_args(self.op)
if num_positional <= 1:
raise Exception(
f"The {self.op} accepts {num_positional} positional argument(s), "
f"but the functions decorated with `@augmentation` must accept at least two "
f"positional arguments: the samples and parameters.\nError in: {self}.")
def _np_wrap(mag):
return np.array(mag)
def _contains_data_node(obj):
if isinstance(obj, (tuple, list)):
return any(_contains_data_node(el) for el in obj)
return isinstance(obj, _DataNode)
|
DALI-main
|
dali/python/nvidia/dali/auto_aug/core/_augmentation.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from nvidia.dali.data_node import DataNode as _DataNode
from nvidia.dali.auto_aug.core._select import select
from nvidia.dali.auto_aug.core._args import MissingArgException
from nvidia.dali.auto_aug.core._augmentation import Augmentation
import nvidia.dali.auto_aug.augmentations as a
def max_translate_hw(max_translate):
if isinstance(max_translate, (tuple, list)):
height, width = max_translate
return height, width
return max_translate, max_translate
def parse_validate_offset(use_shape, max_translate_abs=None, max_translate_rel=None,
default_translate_abs=250, default_translate_rel=1.):
# if one passes DataNode (with shapes for instance), the error message would be very vague
if not isinstance(use_shape, bool):
raise Exception(f"The `use_shape` is a flag that should be set to either True or False, "
f"got {use_shape}.")
if use_shape:
if max_translate_abs is not None:
raise Exception("The argument `max_translate_abs` cannot be used with image shapes. "
"You may use `max_translate_rel` instead.")
if max_translate_rel is None:
max_translate_rel = default_translate_rel
return max_translate_hw(max_translate_rel)
else:
if max_translate_rel is not None:
raise Exception("The argument `max_translate_rel` cannot be used without image shapes. "
"You may use `max_translate_abs` instead.")
if max_translate_abs is None:
max_translate_abs = default_translate_abs
return max_translate_hw(max_translate_abs)
def get_translations(use_shape: bool, default_translate_abs: int, default_translate_rel: float,
max_translate_abs: Optional[int] = None,
max_translate_rel: Optional[float] = None):
max_translate_height, max_translate_width = parse_validate_offset(
use_shape, max_translate_abs=max_translate_abs, max_translate_rel=max_translate_rel,
default_translate_abs=default_translate_abs, default_translate_rel=default_translate_rel)
if use_shape:
translate_x = a.translate_x.augmentation((0, max_translate_width), True)
translate_y = a.translate_y.augmentation((0, max_translate_height), True)
return [translate_x, translate_y]
else:
translate_x = a.translate_x_no_shape.augmentation((0, max_translate_width), True)
translate_y = a.translate_y_no_shape.augmentation((0, max_translate_height), True)
return [translate_x, translate_y]
def pretty_select(augmentations: List[Augmentation], aug_ids: _DataNode, op_kwargs,
auto_aug_name: str, ref_suite_name: str):
try:
return select(augmentations, aug_ids, **op_kwargs)
except MissingArgException as e:
if e.missing_args != ['shape'] or e.augmentation.op not in [
a.translate_x.op, a.translate_y.op
]:
raise
else:
raise Exception(
f"The augmentation `{e.augmentation.name}` requires `shape` argument that "
f"describes image shape (in HWC layout). Please provide it as `shape` argument "
f"to `{auto_aug_name}` call. You can get the image shape from encoded "
f"images with `fn.peek_image_shape`. Alternatively, you can use "
f"`translate_x_no_shape`/`translate_y_no_shape` that does not rely on image "
f"shape, but uses offset from fixed range: for reference see `{ref_suite_name}` "
f"and its `use_shape` argument. ")
|
DALI-main
|
dali/python/nvidia/dali/auto_aug/core/_utils.py
|
# Copyright (c) 2017-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-member
import sys
import threading
import tree
import warnings
from itertools import count
import nvidia.dali.python_function_plugin
from nvidia.dali import backend as _b
from nvidia.dali import fn as _functional
from nvidia.dali import internal as _internal
from nvidia.dali.data_node import DataNode as _DataNode
from nvidia.dali.pipeline import Pipeline as _Pipeline
from nvidia.dali.types import (_type_name_convert_to_string, _type_convert_value, # noqa: F401
_default_converter, _vector_element_type, # noqa: F401
ScalarConstant as _ScalarConstant, Constant as _Constant)
from nvidia.dali import _conditionals
from nvidia.dali.ops import (_registry, _names, _docs, _operator_utils) # noqa: F401
# reexpose what was previously visible:
from nvidia.dali.ops._registry import (cpu_ops, mixed_ops, gpu_ops, register_cpu_op, # noqa: F401
register_gpu_op) # noqa: F401
from nvidia.dali.ops._names import (_op_name, _process_op_name, _schema_name)
from nvidia.dali.ops._operator_utils import (_build_input_sets, _repack_output_sets, )
class _OpCounter(object):
# pylint: disable=too-few-public-methods
_lock = threading.Lock()
_op_count = count(0)
def __init__(self):
with self._lock:
self._id = next(self._op_count)
@property
def id(self):
return self._id
def _instantiate_constant_node(constant: _ScalarConstant, device: str):
"""Generate a DataNode (creating a Constant operator) based on the provided ScalarConstant.
"""
return _Constant(device=device, value=constant.value, dtype=constant.dtype,
shape=constant.shape)
# TODO(klecki): The curse of multiple input sets and optimization prohibits us from using this
# code-path both for inputs and argument inputs.
def _handle_constant(value, device, input_name, op_name):
"""Handle promotion of possible constant value passed as (argument) input to an operator-backed
DataNode. Pass-through if the value is a DataNode.
Parameters
----------
value : DataNode, ScalarConstant or value convertible to a constant op
The value to be processed.
device : str
Target placement of constant node.
input_name : int or str
Position or name of the input, for error reporting purposes.
op_name : str
Name of the invoked operator, for error reporting purposes.
Returns
-------
DataNode
Either the same node as input or newly created DataNode representing the constant.
Raises
------
TypeError
Error in case a constant was passed that is not possible to be converted by DALI.
"""
if isinstance(value, _DataNode):
return value
if isinstance(value, _ScalarConstant):
return _instantiate_constant_node(value, device)
try:
value = _Constant(value, device=device)
except Exception as e:
raise TypeError(f"when calling operator {op_name}: "
f"expected inputs of type `DataNode` or convertible to "
f"constant nodes. Received input `{input_name}` of type "
f"'{type(value).__name__}'.") from e
return value
def _separate_kwargs(kwargs, arg_input_type=_DataNode):
"""Separates arguments into scalar arguments and argument inputs (data nodes or tensor lists),
that were historically specified in __init__ and __call__ of operator class.
Returns a pair of dictionaries of kwargs - the first for arguments (__init__), the second for
argument inputs (__call__).
Args:
kwargs: Keyword arguments.
arg_input_type: operator's argument input type, DataNode for pipeline mode, TensorListCPU
for eager mode.
"""
def is_arg_input_type(x):
return isinstance(x, arg_input_type)
def is_arg_input_or_name(name, value):
if name == "device":
return False
if name == "ndim":
return False
if name == "name" or is_arg_input_type(value):
return True
if isinstance(value, (str, list, tuple, nvidia.dali.types.ScalarConstant)):
return False
return not nvidia.dali.types._is_scalar_value(value)
def to_scalar(scalar):
return scalar.value if isinstance(scalar, nvidia.dali.types.ScalarConstant) else scalar
init_args = {}
call_args = {}
for name, value in kwargs.items():
if value is None:
continue
if is_arg_input_or_name(name, value):
call_args[name] = value
else:
init_args[name] = to_scalar(value)
return init_args, call_args
def _handle_deprecations(schema, kwargs, op_name):
"""Handle deprecation of the named arguments (scalar arguments and argument inputs) specified
to the operator.
Based on the schema information the argument can be automatically renamed or dropped
with appropriate warnings being issued. Errors are raised if both the old and new name of
renamed argument are used.
Parameters
----------
schema : OpSchema
Schema for the operator containing the deprecation information.
kwargs : Dict
Dictionary containing the arguments.
op_name : str
Name of the invoked operator, for error reporting purposes.
Returns
-------
Dict
Dictionary with arguments rearranged
"""
arg_names = list(kwargs.keys())
for arg_name in arg_names:
if not schema.IsDeprecatedArg(arg_name):
continue
meta = schema.DeprecatedArgMeta(arg_name)
new_name = meta['renamed_to']
removed = meta['removed']
msg = meta['msg']
if new_name:
if new_name in kwargs:
raise TypeError(f"Operator {op_name} got an unexpected '{arg_name}' deprecated"
f" argument when '{new_name}' was already provided")
kwargs[new_name] = kwargs[arg_name]
del kwargs[arg_name]
elif removed:
del kwargs[arg_name]
with warnings.catch_warnings():
warnings.simplefilter("default")
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return kwargs
def _add_spec_args(schema, spec, kwargs):
for key, value in kwargs.items():
if value is None:
# None is not a valid value for any argument type, so treat it
# as if the argument was not supplied at all
continue
dtype = schema.GetArgumentType(key)
if isinstance(value, (list, tuple)):
if len(value) == 0:
spec.AddArgEmptyList(key, _vector_element_type(dtype))
continue
converted_value = _type_convert_value(dtype, value)
spec.AddArg(key, converted_value)
class _OperatorInstance(object):
def __init__(self, inputs, op, **kwargs):
self._counter = _OpCounter()
self._outputs = []
self._op = op
self._default_call_args = op._call_args
self._spec = op.spec.copy()
self._relation_id = self._counter.id
if _conditionals.conditionals_enabled():
inputs, kwargs = _conditionals.apply_conditional_split_to_args(inputs, kwargs)
self._inputs = inputs
spec_args, kwargs = _separate_kwargs(kwargs)
_add_spec_args(op._schema, self._spec, spec_args)
call_args = {**self._default_call_args}
for k, v in kwargs.items():
if v is None:
# if an argument was specified in __init__ and in __call__ it is None, ignore it
continue
if k in self._default_call_args:
raise ValueError("The argument `{}` was already specified in __init__.".format(k))
call_args[k] = v
name = call_args.get("name", None)
if name is not None:
self._name = name
else:
self._name = '__' + type(op).__name__ + "_" + str(self._counter.id)
# Add inputs
if inputs:
for inp in inputs:
if not isinstance(inp, _DataNode):
raise TypeError(
f"Expected inputs of type `DataNode`. Received input of type '{inp}'.")
self._spec.AddInput(inp.name, inp.device)
# Argument inputs
for k in sorted(call_args.keys()):
if k not in ["name"]:
arg_inp = call_args[k]
if arg_inp is None:
continue
# Argument input constants are always placed on CPU
arg_inp = _handle_constant(arg_inp, "cpu", k, type(op).__name__)
_check_arg_input(op._schema, type(self._op).__name__, k)
self._spec.AddArgumentInput(k, arg_inp.name)
self._inputs = list(self._inputs) + [arg_inp]
if self._op.schema.IsDeprecated():
# TODO(klecki): how to know if this is fn or ops?
msg = "WARNING: `{}` is now deprecated".format(_op_name(type(self._op).__name__, "fn"))
use_instead = _op_name(self._op.schema.DeprecatedInFavorOf(), "fn")
if use_instead:
msg += ". Use `" + use_instead + "` instead."
explanation = self._op.schema.DeprecationMessage()
if explanation:
msg += "\n" + explanation
with warnings.catch_warnings():
warnings.simplefilter("default")
warnings.warn(msg, DeprecationWarning, stacklevel=2)
def check_args(self):
self._op.schema.CheckArgs(self._spec)
def generate_outputs(self):
pipeline = _Pipeline.current()
if pipeline is None and self._op.preserve:
_Pipeline._raise_pipeline_required("Operators with side-effects ")
# Add outputs
if self._op.device == "gpu" or self._op.device == "mixed":
output_device = "gpu"
else:
output_device = "cpu"
num_output = (self._op.schema.CalculateOutputs(self._spec)
+ self._op.schema.CalculateAdditionalOutputs(self._spec))
if num_output == 0 and self._op.preserve:
t_name = type(self._op).__name__ + "_id_" + str(self.id) + "_sink"
pipeline.add_sink(_DataNode(t_name, output_device, self))
return
for i in range(num_output):
t_name = self._name
if num_output > 1:
t_name += "[{}]".format(i)
t = _DataNode(t_name, output_device, self)
self._spec.AddOutput(t.name, t.device)
if self._op.preserve:
pipeline.add_sink(t)
self.append_output(t)
@property
def id(self):
return self._counter.id
@property
def inputs(self):
return self._inputs
@property
def outputs(self):
return self._outputs
@property
def unwrapped_outputs(self):
if len(self._outputs) == 1:
return self._outputs[0]
else:
return self._outputs
@property
def spec(self):
return self._spec
@property
def name(self):
return self._name
@property
def relation_id(self):
return self._relation_id
@relation_id.setter
def relation_id(self, value):
self._relation_id = value
def append_output(self, output):
self._outputs.append(output)
class _DaliOperatorMeta(type):
@property
def __doc__(self):
return _docs._docstring_generator(self)
def _check_arg_input(schema, op_name, name):
if name == "name":
return
if not schema.IsTensorArgument(name):
expected_type_name = _type_name_convert_to_string(schema.GetArgumentType(name), False)
raise TypeError(
f"The argument `{name}` for operator `{op_name}` should not be a `DataNode` but a "
f"{expected_type_name}")
def python_op_factory(name, schema_name=None):
class Operator(metaclass=_DaliOperatorMeta):
def __init__(self, *, device="cpu", **kwargs):
schema_name = _schema_name(type(self))
self._spec = _b.OpSpec(schema_name)
self._schema = _b.GetSchema(schema_name)
# Get the device argument. We will need this to determine
# the device that our outputs will be stored on
self._device = device
self._spec.AddArg("device", self._device)
kwargs, self._call_args = _separate_kwargs(kwargs)
for k in self._call_args.keys():
_check_arg_input(self._schema, type(self).__name__, k)
if "preserve" in kwargs.keys():
self._preserve = kwargs["preserve"]
# we don't want to set "preserve" arg twice
del kwargs["preserve"]
else:
self._preserve = False
self._spec.AddArg("preserve", self._preserve)
self._preserve = self._preserve or self._schema.IsNoPrune()
# Check for any deprecated arguments that should be replaced or removed
# TODO(klecki): Why can't we also handle deprecation of Argument Inputs?
kwargs = _handle_deprecations(self._schema, kwargs, type(self).__name__)
# Store the specified arguments
_add_spec_args(self._schema, self._spec, kwargs)
@property
def spec(self):
return self._spec
@property
def schema(self):
return self._schema
@property
def device(self):
return self._device
@property
def preserve(self):
return self._preserve
def __call__(self, *inputs, **kwargs):
self._check_schema_num_inputs(inputs)
inputs = _preprocess_inputs(inputs, self.__class__.__name__, self._device, self._schema)
input_sets = _build_input_sets(inputs, self.__class__.__name__)
# Create OperatorInstance for every input set
op_instances = []
for input_set in input_sets:
op_instances.append(_OperatorInstance(input_set, self, **kwargs))
op_instances[-1].generate_outputs()
# Tie the instances together
relation_id = op_instances[0].id
for op in op_instances:
op.relation_id = relation_id
# If we don't have multiple input sets, flatten the result
if len(op_instances) == 1:
result = op_instances[0].unwrapped_outputs
else:
outputs = []
for op in op_instances:
outputs.append(op.outputs)
result = _repack_output_sets(outputs)
if _conditionals.conditionals_enabled():
if len(op_instances) != 1:
raise ValueError("Multiple input sets are not supported with conditional"
" execution (when `enable_conditionals=True`)")
_conditionals.register_data_nodes(result, input_sets[0], kwargs)
return result
def _check_schema_num_inputs(self, inputs):
if len(inputs) < self._schema.MinNumInput() or len(inputs) > self._schema.MaxNumInput():
raise ValueError(
f"Operator {type(self).__name__} expects "
f"from {self._schema.MinNumInput()} to {self._schema.MaxNumInput()} inputs, "
f"but received {len(inputs)}.")
Operator.__name__ = str(name)
Operator.schema_name = schema_name or Operator.__name__
Operator.__call__.__doc__ = _docs._docstring_generator_call(Operator.schema_name)
return Operator
def _wrap_op(op_class, submodule=[], parent_module=None):
return _functional._wrap_op(op_class, submodule, parent_module,
_docs._docstring_generator_fn(op_class))
def _load_ops():
_registry._discover_ops()
_all_ops = _registry._all_registered_ops()
ops_module = sys.modules[__name__]
for op_reg_name in _all_ops:
# TODO(klecki): Make this a function: _add_op(op_reg_name) and invoke it immediately
# with register_xxx_op(). Now it relies on those class being present in this module
# at the time of registration.
schema = _b.TryGetSchema(op_reg_name)
make_hidden = schema.IsDocHidden() if schema else False
_, submodule, op_name = _process_op_name(op_reg_name, make_hidden)
module = _internal.get_submodule(ops_module, submodule)
if not hasattr(module, op_name):
op_class = python_op_factory(op_name, op_reg_name)
op_class.__module__ = module.__name__
setattr(module, op_name, op_class)
if op_name not in ["ExternalSource"]:
_wrap_op(op_class, submodule)
# The operator was inserted into nvidia.dali.ops.hidden module, let's import it here
# so it would be usable, but not documented as coming from other module
if make_hidden:
parent_module = _internal.get_submodule(ops_module, submodule[:-1])
setattr(parent_module, op_name, op_class)
def Reload():
_load_ops()
def _load_readers_tfrecord():
"""After backend ops are loaded, load the TFRecord readers (if they are available).
"""
from nvidia.dali.ops._operators import tfrecord
if not tfrecord.tfrecord_enabled():
return
tfrecord._TFRecordReaderImpl.__call__.__doc__ = _docs._docstring_generator_call(
"readers__TFRecord")
_registry.register_cpu_op('readers__TFRecord')
_registry.register_cpu_op('TFRecordReader')
ops_module = sys.modules[__name__]
for op_reg_name, op_class in [('readers__TFRecord', tfrecord.TFRecord),
('TFRecordReader', tfrecord.TFRecordReader)]:
op_class.schema_name = op_reg_name
_, submodule, op_name = _process_op_name(op_reg_name)
module = _internal.get_submodule(ops_module, submodule)
if not hasattr(module, op_name):
op_class.__module__ = module.__name__
setattr(module, op_name, op_class)
_wrap_op(op_class, submodule)
def _choose_device(inputs):
for input in inputs:
if isinstance(input, (tuple, list)):
if any(getattr(inp, "device", None) == "gpu" for inp in input):
return "gpu"
elif getattr(input, "device", None) == "gpu":
return "gpu"
return "cpu"
def _preprocess_inputs(inputs, op_name, device, schema=None):
"""Promote all scalar values in the inputs tuple into operator-backed DataNodes.
This operation needs to be performed first, so we can have less duplicated constant nodes
when dealing with multiple input sets.
Parameters
----------
inputs : tuple
The inputs can contain one level nesting of Multiple Input Sets.
"""
if isinstance(inputs, tuple):
inputs = list(inputs)
def is_input(x):
if isinstance(x, (_DataNode, nvidia.dali.types.ScalarConstant)):
return True
# One level of nesting for Multiple Input Sets. It must be a List[DataNode/ScalarConstant]
# with at least one DataNode.
return (isinstance(x, (list))
and any(isinstance(y, _DataNode) for y in x)
and all(isinstance(y, (_DataNode, nvidia.dali.types.ScalarConstant)) for y in x))
def get_input_device(schema, input_idx):
default_input_device = "gpu" if device == "gpu" else "cpu"
if schema:
input_device = schema.GetInputDevice(input_idx) or default_input_device
else:
input_device = default_input_device
return input_device
def _promote_scalar_constant(value, input_device):
"""When ScalarConstant is encountered, promote it to a DataNode, otherwise do
a pass-through.
"""
if isinstance(value, _ScalarConstant):
return _instantiate_constant_node(value, input_device)
return value
for idx, inp in enumerate(inputs):
if not is_input(inp):
try:
inp = _Constant(inp, device=get_input_device(schema, idx))
except Exception as ex:
raise TypeError(f"when calling operator {op_name}: "
f"expected inputs of type `DataNode`, list of `DataNode` "
f"or convertible to constant nodes. Received "
f"input `{idx}` of type '{type(inp).__name__}'.") from ex
if not isinstance(inp, _DataNode):
dev = get_input_device(schema, idx)
# Process the single ScalarConstant or list possibly containing ScalarConstants
# and promote each of them into a DataNode
inp = tree.map_structure(lambda val: _promote_scalar_constant(val, dev), inp)
inputs[idx] = inp
return inputs
# This must go at the end - the purpose of these imports is to expose the operators in
# nvidia.dali.ops module
# Expose just the ExternalSource class, the fn.external_source is exposed by hand in
# appropriate module.
from nvidia.dali.external_source import ExternalSource # noqa: E402
ExternalSource.__module__ = __name__
# Expose the PythonFunction family of classes and generate the fn bindings for them
from nvidia.dali.ops._operators.python_function import ( # noqa: E402, F401
PythonFunctionBase, # noqa: F401
PythonFunction, DLTensorPythonFunction, _dlpack_to_array, # noqa: F401
_dlpack_from_array) # noqa: F401
_wrap_op(PythonFunction)
_wrap_op(DLTensorPythonFunction)
# Compose is only exposed for ops API, no fn bindings are generated
from nvidia.dali.ops._operators.compose import Compose # noqa: E402, F401
_registry.register_cpu_op('Compose')
_registry.register_gpu_op('Compose')
from nvidia.dali.ops._operators.math import (_arithm_op, _group_inputs, # noqa: E402, F401
_generate_input_desc) # noqa: F401
# Discover and generate bindings for all regular operators.
_load_ops()
# Load the TFRecord after the backend ops are processed, to wrap it conditionally, if it exists.
_load_readers_tfrecord()
|
DALI-main
|
dali/python/nvidia/dali/ops/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import backend as _b
# Registry of operators names for given backends
_cpu_ops = set({})
_gpu_ops = set({})
_mixed_ops = set({})
def cpu_ops():
"""Get the set of the names of all registered CPU operators"""
return _cpu_ops
def gpu_ops():
"""Get the set of the names of all registered GPU operators"""
return _gpu_ops
def mixed_ops():
"""Get the set of the names of all registered Mixed operators"""
return _mixed_ops
def _all_registered_ops():
"""Return the set of the names of all registered operators"""
return _cpu_ops.union(_gpu_ops).union(_mixed_ops)
def register_cpu_op(name):
"""Add new CPU op name to the registry."""
global _cpu_ops
_cpu_ops = _cpu_ops.union({name})
def register_gpu_op(name):
"""Add new GPU op name to the registry"""
global _gpu_ops
_gpu_ops = _gpu_ops.union({name})
def _discover_ops():
"""Query the backend for all registered operator names, update the Python-side registry of
operator names."""
global _cpu_ops
global _gpu_ops
global _mixed_ops
_cpu_ops = _cpu_ops.union(set(_b.RegisteredCPUOps()))
_gpu_ops = _gpu_ops.union(set(_b.RegisteredGPUOps()))
_mixed_ops = _mixed_ops.union(set(_b.RegisteredMixedOps()))
|
DALI-main
|
dali/python/nvidia/dali/ops/_registry.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
from nvidia.dali import backend as _b
from nvidia.dali.types import _default_converter, _type_name_convert_to_string
from nvidia.dali.ops import _registry, _names
def _numpydoc_formatter(name, type, doc, optional=False):
"""
Format the documentation for single argument, `name`, `type` and `doc` are expected to be
strings.
The formatting is:
<name> : <type>[, optional]
<doc>
"""
indent = "\n" + " " * 4
if optional:
type += ", optional"
return "`{}` : {}{}{}".format(name, type, indent, doc.replace("\n", indent))
def _get_inputs_doc(schema):
"""
Generate numpydoc-formatted docstring section for operator inputs (positional arguments)
based on the schema.
The inputs are represented in `Args` section using `_numpydoc_formatter`.
If schema provides names and docstrings for inputs, they are used, otherwise placeholder
text is used indicating the supported number of inputs.
Note: The type of input is indicated as TensorList with supported layouts listed.
schema : OpSchema
Schema of the operator to be documented
"""
# Inputs section
if schema.MaxNumInput() == 0:
return ""
ret = """
Args
----
"""
if schema.HasInputDox():
for i in range(schema.MaxNumInput()):
optional = i >= schema.MinNumInput()
input_type_str = schema.GetInputType(i) + _supported_layouts_str(
schema.GetSupportedLayouts(i))
dox = schema.GetInputDox(i)
input_name = schema.GetInputName(i)
ret += _numpydoc_formatter(input_name, input_type_str, dox, optional) + "\n"
else:
for i in range(schema.MinNumInput()):
input_type_str = "TensorList" + _supported_layouts_str(schema.GetSupportedLayouts(i))
dox = "Input to the operator."
input_name = f"input{i}" if schema.MaxNumInput() > 1 else "input"
ret += _numpydoc_formatter(input_name, input_type_str, dox, False) + "\n"
extra_opt_args = schema.MaxNumInput() - schema.MinNumInput()
if extra_opt_args == 1:
i = schema.MinNumInput()
input_type_str = "TensorList" + _supported_layouts_str(schema.GetSupportedLayouts(i))
dox = "Input to the operator."
input_name = f"input{i}" if schema.MaxNumInput() > 1 else "input"
ret += _numpydoc_formatter(input_name, input_type_str, dox, True) + "\n"
elif extra_opt_args > 1:
input_type_str = "TensorList"
input_name = f"input[{schema.MinNumInput()}..{schema.MaxNumInput()-1}]"
dox = f"This function accepts up to {extra_opt_args} optional positional inputs"
ret += _numpydoc_formatter(input_name, input_type_str, dox, True) + "\n"
ret += "\n"
return ret
def _get_kwargs(schema):
"""
Get the numpydoc-formatted docstring section for keywords arguments.
schema : OpSchema
Schema of the operator to be documented
"""
ret = ""
for arg in schema.GetArgumentNames():
skip_full_doc = False
type_name = ""
dtype = None
doc = ""
deprecation_warning = None
if schema.IsDeprecatedArg(arg):
meta = schema.DeprecatedArgMeta(arg)
msg = meta['msg']
assert msg is not None
deprecation_warning = ".. warning::\n\n " + msg.replace("\n", "\n ")
renamed_arg = meta['renamed_to']
# Renamed and removed arguments won't show full documentation (only warning box)
skip_full_doc = renamed_arg or meta['removed']
# Renamed aliases are not fully registered to the schema, that's why we query for the
# info on the renamed_arg name.
if renamed_arg:
dtype = schema.GetArgumentType(renamed_arg)
type_name = _type_name_convert_to_string(
dtype, allow_tensors=schema.IsTensorArgument(renamed_arg))
# Try to get dtype only if not set already
# (renamed args go through a different path, see above)
if not dtype:
dtype = schema.GetArgumentType(arg)
type_name = _type_name_convert_to_string(dtype,
allow_tensors=schema.IsTensorArgument(arg))
# Add argument documentation if necessary
if not skip_full_doc:
if schema.IsArgumentOptional(arg):
type_name += ", optional"
if schema.HasArgumentDefaultValue(arg):
default_value_string = schema.GetArgumentDefaultValueString(arg)
default_value = ast.literal_eval(default_value_string)
type_name += ", default = `{}`".format(_default_converter(dtype, default_value))
doc += schema.GetArgumentDox(arg).rstrip("\n")
if schema.ArgSupportsPerFrameInput(arg):
doc += "\n\nSupports :func:`per-frame<nvidia.dali.fn.per_frame>` inputs."
if deprecation_warning:
doc += "\n\n" + deprecation_warning
elif deprecation_warning:
doc += deprecation_warning
ret += _numpydoc_formatter(arg, type_name, doc)
ret += '\n'
return ret
def _docstring_generator_main(cls, api):
"""
Generate docstring for the class obtaining it from schema based on cls.__name__
This lists all the Keyword args that can be used when creating operator
"""
op_name = _names._schema_name(cls)
schema = _b.GetSchema(op_name)
ret = '\n'
if schema.IsDeprecated():
use_instead = _names._op_name(schema.DeprecatedInFavorOf(), api)
ret += ".. warning::\n\n This operator is now deprecated"
if use_instead:
ret += ". Use :meth:`" + use_instead + "` instead."
explanation = schema.DeprecationMessage()
if explanation:
indent = "\n" + " " * 3
ret += indent
ret += indent
explanation = explanation.replace("\n", indent)
ret += explanation
ret += "\n\n"
ret += schema.Dox()
ret += '\n'
if schema.IsDocPartiallyHidden():
return ret
supported_statements = []
if schema.IsSequenceOperator():
supported_statements.append("expects sequence inputs")
elif schema.AllowsSequences():
supported_statements.append("allows sequence inputs")
if schema.SupportsVolumetric():
supported_statements.append("supports volumetric data")
if len(supported_statements) > 0:
ret += "\nThis operator "
ret += supported_statements[0]
if len(supported_statements) > 1:
ret += " and " + supported_statements[1]
ret += ".\n"
if schema.IsNoPrune():
ret += "\nThis operator will **not** be optimized out of the graph.\n"
op_dev = []
if op_name in _registry.cpu_ops():
op_dev.append("'cpu'")
if op_name in _registry.gpu_ops():
op_dev.append("'gpu'")
if op_name in _registry.mixed_ops():
op_dev.append("'mixed'")
ret += """
Supported backends
"""
for dev in op_dev:
ret += " * " + dev + "\n"
ret += "\n"
return ret
def _docstring_generator(cls):
op_name = _names._schema_name(cls)
schema = _b.GetSchema(op_name)
ret = _docstring_generator_main(cls, "ops")
if schema.IsDocPartiallyHidden():
return ret
ret += """
Keyword args
------------
"""
ret += _get_kwargs(schema)
return ret
def _supported_layouts_str(supported_layouts):
if len(supported_layouts) == 0:
return ""
return " (" + ", ".join(["\'" + str(layout) + "\'" for layout in supported_layouts]) + ")"
def _docstring_prefix_from_inputs(op_name):
"""
Generate start of the docstring for `__call__` of Operator `op_name`
assuming the docstrings were provided for all inputs separately
Returns the signature of `__call__` and list of `Args` in appropriate section
"""
schema = _b.GetSchema(op_name)
# Signature
ret = "__call__(" + schema.GetCallSignatureInputs() + ", **kwargs)\n"
# __call__ docstring
ret += "\nOperator call to be used in graph definition.\n"
# Args section
ret += _get_inputs_doc(schema)
return ret
def _docstring_prefix_auto(op_name):
"""
Generate start of the docstring for `__call__` of Operator `op_name`
with default values. Assumes there will be 0 or 1 inputs
"""
schema = _b.GetSchema(op_name)
if schema.MaxNumInput() == 0:
return """__call__(**kwargs)
Operator call to be used in graph definition. This operator doesn't have any inputs.
"""
elif schema.MaxNumInput() == 1:
ret = """__call__(data, **kwargs)
Operator call to be used in graph definition.
Args
----
"""
dox = "Input to the operator.\n"
fmt = "TensorList" + _supported_layouts_str(schema.GetSupportedLayouts(0))
ret += _numpydoc_formatter("data", fmt, dox, optional=False)
return ret
return ""
def _docstring_generator_call(op_name):
"""
Generate full docstring for `__call__` of Operator `op_name`.
"""
schema = _b.GetSchema(op_name)
if schema.IsDocPartiallyHidden():
return ""
if schema.HasCallDox():
ret = schema.GetCallDox()
elif schema.HasInputDox():
ret = _docstring_prefix_from_inputs(op_name)
elif schema.CanUseAutoInputDox():
ret = _docstring_prefix_auto(op_name)
else:
op_full_name, _, _ = _names._process_op_name(op_name)
ret = "See :meth:`nvidia.dali.ops." + op_full_name + "` class for complete information.\n"
if schema.AppendKwargsSection():
# Kwargs section
tensor_kwargs = _get_kwargs(schema)
if tensor_kwargs:
ret += """
Keyword Args
------------
"""
ret += tensor_kwargs
return ret
def _docstring_generator_fn(cls):
op_name = _names._schema_name(cls)
schema = _b.GetSchema(op_name)
ret = _docstring_generator_main(cls, "fn")
if schema.IsDocPartiallyHidden():
return ret
ret += _get_inputs_doc(schema)
ret += """
Keyword args
------------
"""
ret += _get_kwargs(schema)
return ret
|
DALI-main
|
dali/python/nvidia/dali/ops/_docs.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def _repack_list(sets, fn):
"""Repack list from [[a, b, c], [a', b', c'], ....]
to [fn(a, a', ...), fn(b, b', ...), fn(c, c', ...)]
where fn can be `tuple` or `list`
Assume that all elements of input have the same length
"""
output_list = []
arg_list_len = len(sets[0])
for i in range(arg_list_len):
output_list.append(fn(input_set[i] for input_set in sets))
return output_list
def _repack_output_sets(outputs):
"""Repack and "transpose" the output sets, from groups of outputs of individual operators
to interleaved groups of consecutive outputs, that is from:
[[out0, out1, out2], [out0', out1', out2'], ...] produce:
[[out0, out0', ...], [out1, out1', ...], [out2, out2', ...]]
Assume that all elements of input have the same length
If the inputs were 1-elem lists, it is flattened, that is:
[[out0], [out0'], [out0''], ...] -> [out0, out0', out0'', ...]
"""
if len(outputs) > 1 and len(outputs[0]) == 1:
output = []
for elem in outputs:
output.append(elem[0])
return output
return _repack_list(outputs, list)
def _build_input_sets(inputs, op_name):
"""Detect if the list of positional inputs [Inp_0, Inp_1, Inp_2, ...], represents Multiple
Input Sets (MIS) to operator and prepare lists of regular DataNode-only positional inputs to
individual operator instances.
If all Inp_i are DataNodes there are no MIS involved.
If any of Inp_i is a list of DataNodes, this is considered a MIS. In that case, non-list
Inp_i is repeated to match the length of the one that is a list, and those lists are regrouped,
for example:
inputs = [a, b, [x, y, z], [u, v, w]]
# "a" and "b" are repeated to match the length of [x, y, z]:
-> [[a, a, a], [b, b, b], [x, y, z], [u, v, w]]
# input sets are rearranged, so they form a regular tuples of DataNodes suitable to being passed
# to one Operator Instance.
-> [(a, b, x, u), (a, b, y, v), (a, b, z, w)]
This allows to create 3 operator instances, each with 4 positional inputs.
Parameters
----------
inputs : List of positional inputs
The inputs are either DataNodes or lists of DataNodes indicating MIS.
op_name : str
Name of the invoked operator, for error reporting purposes.
"""
def _detect_multiple_input_sets(inputs):
"""Check if any of inputs is a list, indicating a usage of MIS."""
return any(isinstance(input, list) for input in inputs)
def _safe_len(input):
if isinstance(input, list):
return len(input)
else:
return 1
def _check_common_length(inputs):
"""Check if all list representing multiple input sets have the same length and return it"""
arg_list_len = max(_safe_len(input) for input in inputs)
for input in inputs:
if isinstance(input, list):
if len(input) != arg_list_len:
raise ValueError(f"All argument lists for Multiple Input Sets used "
f"with operator {op_name} must have "
f"the same length")
return arg_list_len
def _unify_lists(inputs, arg_list_len):
"""Pack single _DataNodes into lists, so they are treated as Multiple Input Sets
consistently with the ones already present
Parameters
----------
arg_list_len : int
Number of MIS.
"""
result = ()
for input in inputs:
if isinstance(input, list):
result = result + (input, )
else:
result = result + ([input] * arg_list_len, )
return result
def _repack_input_sets(inputs):
"""Zip the list from [[arg0, arg0', arg0''], [arg1', arg1'', arg1''], ...]
to [(arg0, arg1, ...), (arg0', arg1', ...), (arg0'', arg1'', ...)]
"""
return _repack_list(inputs, tuple)
input_sets = []
if _detect_multiple_input_sets(inputs):
arg_list_len = _check_common_length(inputs)
packed_inputs = _unify_lists(inputs, arg_list_len)
input_sets = _repack_input_sets(packed_inputs)
else:
input_sets = [inputs]
return input_sets
|
DALI-main
|
dali/python/nvidia/dali/ops/_operator_utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import fn as _functional
def _schema_name(cls):
"""Extract the name of the schema from Operator class."""
return getattr(cls, 'schema_name', cls.__name__)
def _process_op_name(op_schema_name, make_hidden=False):
"""Based on the schema name (for example "Resize" or "experimental__readers__Video")
transform it into Python-compatible module & operator name information.
Parameters
----------
op_schema_name : str
The name of the schema
make_hidden : bool, optional
Should a .hidden module be added to the module path to indicate an internal operator,
that it's later reimported but not directly discoverable, by default False
Returns
-------
(str, list, str)
(Full name with all submodules, submodule path to the operator, name of the operator),
for example:
("Resize", [], "Resize") or
("experimental.readers.Video", ["experimental", "readers"], "Video")
"""
# Two underscores (reasoning: we might want to have single underscores in the namespace itself)
namespace_delim = "__"
op_full_name = op_schema_name.replace(namespace_delim, '.')
*submodule, op_name = op_full_name.split('.')
if make_hidden:
submodule = [*submodule, 'hidden']
return op_full_name, submodule, op_name
def _op_name(op_schema_name, api="fn"):
"""Extract the name of the operator from the schema and return it transformed for given API:
CamelCase for "ops" API, and snake_case for "fn" API. The name contains full module path,
for example:
_op_name("experimental__readers__VideoResize", "fn") -> "experimental.readers.video_resize"
Parameters
----------
op_schema_name : str
The name of the schema
api : str, optional
API type, "ops" or "fn", by default "fn"
Returns
-------
str
The fully qualified name in given API
"""
full_name, submodule, op_name = _process_op_name(op_schema_name)
if api == "fn":
return ".".join([*submodule, _functional._to_snake_case(op_name)])
elif api == "ops":
return full_name
else:
raise ValueError(f'{api} is not a valid DALI api name, try one of {"fn", "ops"}')
|
DALI-main
|
dali/python/nvidia/dali/ops/_names.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali.python_function_plugin
from nvidia.dali import backend as _b
from nvidia.dali import _conditionals
from nvidia.dali import ops
from nvidia.dali.ops import _registry
from nvidia.dali.data_node import DataNode as _DataNode
from nvidia.dali.pipeline import Pipeline as _Pipeline
from nvidia.dali.types import (CUDAStream as _CUDAStream)
cupy = None
def _setup_cupy():
global cupy
if cupy is None:
import cupy as cupy
class PythonFunctionBase(metaclass=ops._DaliOperatorMeta):
def __init__(self, impl_name, function, num_outputs=1, device='cpu', **kwargs):
self._schema = _b.GetSchema(impl_name)
self._spec = _b.OpSpec(impl_name)
self._device = device
self._impl_name = impl_name
kwargs, self._call_args = ops._separate_kwargs(kwargs)
for key, value in kwargs.items():
self._spec.AddArg(key, value)
self.function = function
self.num_outputs = num_outputs
self._preserve = True
@property
def spec(self):
return self._spec
@property
def schema(self):
return self._schema
@property
def device(self):
return self._device
@property
def preserve(self):
return self._preserve
def __call__(self, *inputs, **kwargs):
inputs = ops._preprocess_inputs(inputs, self._impl_name, self._device, None)
pipeline = _Pipeline.current()
if pipeline is None:
_Pipeline._raise_pipeline_required("PythonFunction operator")
if (len(inputs) > self._schema.MaxNumInput() or len(inputs) < self._schema.MinNumInput()):
raise ValueError(
f"Operator {type(self).__name__} expects "
f"from {self._schema.MinNumInput()} to {self._schema.MaxNumInput()} inputs, "
f"but received {len(inputs)}.")
for inp in inputs:
if not isinstance(inp, _DataNode):
raise TypeError(f"Expected inputs of type `DataNode`. "
f"Received input of type '{type(inp).__name__}'. "
f"Python Operators do not support Multiple Input Sets.")
op_instance = ops._OperatorInstance(inputs, self, **kwargs)
op_instance.spec.AddArg("function_id", id(self.function))
op_instance.spec.AddArg("num_outputs", self.num_outputs)
op_instance.spec.AddArg("device", self.device)
if self.num_outputs == 0:
t_name = self._impl_name + "_id_" + str(op_instance.id) + "_sink"
t = _DataNode(t_name, self._device, op_instance)
pipeline.add_sink(t)
return
outputs = []
for i in range(self.num_outputs):
t_name = op_instance._name
if self.num_outputs > 1:
t_name += "[{}]".format(i)
t = _DataNode(t_name, self._device, op_instance)
op_instance.spec.AddOutput(t.name, t.device)
op_instance.append_output(t)
pipeline.add_sink(t)
outputs.append(t)
if _conditionals.conditionals_enabled():
_conditionals.register_data_nodes(outputs, inputs, kwargs)
return outputs[0] if len(outputs) == 1 else outputs
def _dlpack_to_array(dlpack):
return nvidia.dali.python_function_plugin.DLTensorToArray(dlpack)
def _dlpack_from_array(array):
return nvidia.dali.python_function_plugin.ArrayToDLTensor(array)
class PythonFunction(PythonFunctionBase):
schema_name = "PythonFunction"
_registry.register_cpu_op('PythonFunction')
_registry.register_gpu_op('PythonFunction')
@staticmethod
def current_stream():
"""Gets DALI's current CUDA stream."""
return _CUDAStream(nvidia.dali.python_function_plugin.current_dali_stream())
@staticmethod
def check_outputs(outputs, num_outputs):
if num_outputs > 1:
if not isinstance(outputs, tuple):
raise TypeError(
"The output from a multi-output Python"
"function operator must be a tuple, got: ", type(outputs))
if len(outputs) != num_outputs:
raise ValueError(f"Unexpected number of outputs from Python"
f"function operator - got {len(outputs)}, expected {num_outputs}")
@staticmethod
def function_wrapper_per_sample(function, num_outputs, from_dlpack, to_dlpack, *dlpack_inputs):
arrays = [from_dlpack(dlpack) for dlpack in dlpack_inputs]
arr_out = function(*arrays)
if arr_out is None:
return
PythonFunction.check_outputs(arr_out, num_outputs)
if isinstance(arr_out, tuple):
return tuple(map(lambda t: to_dlpack(t), arr_out))
else:
return to_dlpack(arr_out)
@staticmethod
def function_wrapper_batch(function, num_outputs, from_dlpack, to_dlpack, *dlpack_inputs):
arrays = [[from_dlpack(dlpack) for dlpack in dl_input] for dl_input in dlpack_inputs]
arr_outs = function(*arrays)
if arr_outs is None:
return
def convert_batch(batch):
if isinstance(batch, list):
return [to_dlpack(x) for x in batch]
else:
return to_dlpack(batch)
PythonFunction.check_outputs(arr_outs, num_outputs)
if isinstance(arr_outs, tuple):
return tuple(convert_batch(x) for x in arr_outs)
else:
return convert_batch(arr_outs)
@staticmethod
def _function_wrapper_cpu(batch_processing, function, num_outputs, *dlpack_inputs):
if batch_processing:
return PythonFunction.function_wrapper_batch(
function,
num_outputs,
_dlpack_to_array,
_dlpack_from_array,
*dlpack_inputs)
else:
return PythonFunction.function_wrapper_per_sample(
function,
num_outputs,
_dlpack_to_array,
_dlpack_from_array,
*dlpack_inputs)
@staticmethod
def _cupy_stream_wrapper(function, *inputs):
stream = cupy.cuda.Stream(null=True)
stream.ptr = PythonFunction.current_stream().ptr
with stream:
out = function(*inputs)
stream.ptr = 0
return out
@staticmethod
def _function_wrapper_gpu(batch_processing, function, num_outputs, *dlpack_inputs):
def wrapped_func(*inputs):
return PythonFunction._cupy_stream_wrapper(function, *inputs)
if batch_processing:
return PythonFunction.function_wrapper_batch(wrapped_func, num_outputs, cupy.fromDlpack,
lambda t: t.toDlpack(), *dlpack_inputs)
else:
return PythonFunction.function_wrapper_per_sample(wrapped_func, num_outputs,
cupy.fromDlpack,
lambda t: t.toDlpack(),
*dlpack_inputs)
def __init__(self, function, num_outputs=1, device='cpu', batch_processing=False, **kwargs):
if device == 'gpu':
_setup_cupy()
if device == 'cpu':
def func(*ts):
return PythonFunction._function_wrapper_cpu(
batch_processing, function, num_outputs, *ts)
else:
def func(*ts):
return PythonFunction._function_wrapper_gpu(
batch_processing, function, num_outputs, *ts)
super(PythonFunction,
self).__init__(
impl_name="DLTensorPythonFunctionImpl",
function=func,
num_outputs=num_outputs,
device=device,
synchronize_stream=False,
batch_processing=batch_processing,
**kwargs)
class DLTensorPythonFunction(PythonFunctionBase):
schema_name = "DLTensorPythonFunction"
_registry.register_cpu_op('DLTensorPythonFunction')
_registry.register_gpu_op('DLTensorPythonFunction')
@staticmethod
def _function_wrapper_dlpack(batch_processing, function, num_outputs, *dlpack_inputs):
if batch_processing:
return PythonFunction.function_wrapper_batch(function,
num_outputs,
lambda x: x,
lambda x: x,
*dlpack_inputs)
else:
return PythonFunction.function_wrapper_per_sample(function,
num_outputs,
lambda x: x,
lambda x: x,
*dlpack_inputs)
def __init__(self, function, num_outputs=1, device='cpu', synchronize_stream=True,
batch_processing=True, **kwargs):
def func(*ts):
return DLTensorPythonFunction._function_wrapper_dlpack(
batch_processing, function, num_outputs, *ts)
super(DLTensorPythonFunction,
self).__init__(impl_name="DLTensorPythonFunctionImpl",
function=func,
num_outputs=num_outputs,
device=device,
synchronize_stream=synchronize_stream,
batch_processing=batch_processing,
**kwargs)
|
DALI-main
|
dali/python/nvidia/dali/ops/_operators/python_function.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.data_node import DataNode as _DataNode
class _CompoundOp:
def __init__(self, op_list):
self._ops = []
for op in op_list:
if isinstance(op, _CompoundOp):
self._ops += op._ops
else:
self._ops.append(op)
def __call__(self, *inputs, **kwargs):
inputs = list(inputs)
for op in self._ops:
for i in range(len(inputs)):
if inputs[i].device == "cpu" and op.device == "gpu" and op.schema.GetInputDevice(
i) != "cpu":
inputs[i] = inputs[i].gpu()
inputs = op(*inputs, **kwargs)
kwargs = {}
if isinstance(inputs, tuple):
inputs = list(inputs)
if isinstance(inputs, _DataNode):
inputs = [inputs]
return inputs[0] if len(inputs) == 1 else inputs
def Compose(op_list):
"""Returns a meta-operator that chains the operations in op_list.
The return value is a callable object which, when called, performs::
op_list[n-1](op_list([n-2](... op_list[0](args))))
Operators can be composed only when all outputs of the previous operator can be processed directly
by the next operator in the list.
The example below chains an image decoder and a Resize operation with random square size.
The ``decode_and_resize`` object can be called as if it was an operator::
decode_and_resize = ops.Compose([
ops.decoders.Image(device="cpu"),
ops.Resize(size=fn.random.uniform(range=400,500)), device="gpu")
])
files, labels = fn.readers.caffe(path=caffe_db_folder, seed=1)
pipe.set_ouputs(decode_and_resize(files), labels)
If there's a transition from CPU to GPU in the middle of the ``op_list``, as is the case in this
example, ``Compose`` automatically arranges copying the data to GPU memory.
.. note::
This is an experimental feature, subject to change without notice.
"""
return op_list[0] if len(op_list) == 1 else _CompoundOp(op_list)
|
DALI-main
|
dali/python/nvidia/dali/ops/_operators/compose.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module dedicated for keeping internal implementation of Python wrappers and extensions
over the automatically generated operator bindings.
Some operators provided additional functionality, for example dictionary output in case of
TFRecord reader.
Typically, each such operator should be:
1. implemented in this module,
2. name should be registered using one of the nvidia.dali.ops._registry.register_xxx_op(),
3. operator class should be reimported into the nvidia.dali.ops module,
4. the class should be reexposed in fn API via _wrap_op call.
"""
|
DALI-main
|
dali/python/nvidia/dali/ops/_operators/__init__.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import backend as _b
from nvidia.dali import _conditionals
from nvidia.dali import ops
from nvidia.dali.data_node import DataNode as _DataNode
_internal_schemas = ['_TFRecordReader', 'readers___TFRecord']
def tfrecord_enabled():
"""Check if the TFRecord Reader op is enabled by looking up if the internal implementation
was registered in the backend.
This call is valid after the backend ops were discovered (_load_ops() was called).
"""
for internal_schema in _internal_schemas:
if _b.TryGetSchema(internal_schema) is not None:
return True
return False
class _TFRecordReaderImpl():
""" custom wrappers around ops """
def __init__(self, path, index_path, features, **kwargs):
if isinstance(path, list):
self._path = path
else:
self._path = [path]
if isinstance(index_path, list):
self._index_path = index_path
else:
self._index_path = [index_path]
self._schema = _b.GetSchema(self._internal_schema_name)
self._spec = _b.OpSpec(self._internal_schema_name)
self._device = "cpu"
self._spec.AddArg("path", self._path)
self._spec.AddArg("index_path", self._index_path)
kwargs, self._call_args = ops._separate_kwargs(kwargs)
for key, value in kwargs.items():
self._spec.AddArg(key, value)
self._features = features
@property
def spec(self):
return self._spec
@property
def schema(self):
return self._schema
@property
def device(self):
return self._device
def __call__(self, *inputs, **kwargs):
# We do not handle multiple input sets for Reader as they do not have inputs
if (len(inputs) > self._schema.MaxNumInput() or len(inputs) < self._schema.MinNumInput()):
raise ValueError(
f"Operator {type(self).__name__} expects "
f"from {self._schema.MinNumInput()} to {self._schema.MaxNumInput()} inputs, "
f"but received {len(inputs)}.")
op_instance = ops._OperatorInstance(inputs, self, **kwargs)
outputs = {}
feature_names = []
features = []
for i, (feature_name, feature) in enumerate(self._features.items()):
t_name = op_instance._name
if len(self._features.items()) > 1:
t_name += "[{}]".format(i)
t = _DataNode(t_name, self._device, op_instance)
op_instance.spec.AddOutput(t.name, t.device)
op_instance.append_output(t)
outputs[feature_name] = t
feature_names.append(feature_name)
features.append(feature)
# We know this reader doesn't have any inputs
if _conditionals.conditionals_enabled():
_conditionals.register_data_nodes(list(outputs.values()))
op_instance.spec.AddArg("feature_names", feature_names)
op_instance.spec.AddArg("features", features)
return outputs
class TFRecordReader(_TFRecordReaderImpl, metaclass=ops._DaliOperatorMeta):
_internal_schema_name = '_TFRecordReader'
class TFRecord(_TFRecordReaderImpl, metaclass=ops._DaliOperatorMeta):
_internal_schema_name = 'readers___TFRecord'
|
DALI-main
|
dali/python/nvidia/dali/ops/_operators/tfrecord.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import _conditionals
from nvidia.dali.data_node import DataNode as _DataNode
from nvidia.dali.types import (DALIDataType as _DALIDataType, Constant as _Constant, ScalarConstant
as _ScalarConstant, _bool_types, _int_like_types, _float_types)
def _is_boolean_like(input):
"""
Check if scalar constant input provided to arithmetic operator is a boolean constant.
Parameters
----------
input :
Input representing scalar constant (not a DataNode or a tensor constant like np.array)
"""
if type(input) is bool:
return True
if isinstance(input, _ScalarConstant):
if input.dtype in _bool_types:
return True
return False
def _is_integer_like(input):
"""
Check if scalar constant input provided to arithmetic operator is an integer constant.
Boolean and integer types are considered integer-like
Parameters
----------
input :
Input representing scalar constant (not a DataNode or a tensor constant like np.array)
"""
if _is_boolean_like(input):
return True
if type(input) is int:
return True
if isinstance(input, _ScalarConstant):
if input.dtype in _int_like_types:
return True
return False
def _is_real_like(input):
"""
Check if scalar constant input provided to arithmetic operator is a floating point constant.
Parameters
----------
input :
Input representing scalar constant (not a DataNode or a tensor constant like np.array)
"""
if type(input) is float:
return True
if isinstance(input, _ScalarConstant):
if input.dtype in _float_types:
return True
return False
def _to_type_desc(input):
"""
Generate <type> description required by ArithmeticGenericOp for the usage with scalar constants.
"""
if type(input) is bool:
return "bool"
if type(input) is int:
return "int32"
if type(input) is float:
return "float32" # TODO(klecki): current DALI limitation
if isinstance(input, _ScalarConstant):
dtype_to_desc = {
_DALIDataType.BOOL: "bool",
_DALIDataType.INT8: "int8",
_DALIDataType.INT16: "int16",
_DALIDataType.INT32: "int32",
_DALIDataType.INT64: "int64",
_DALIDataType.UINT8: "uint8",
_DALIDataType.UINT16: "uint16",
_DALIDataType.UINT32: "uint32",
_DALIDataType.UINT64: "uint64",
_DALIDataType.FLOAT16: "float16",
_DALIDataType.FLOAT: "float32",
_DALIDataType.FLOAT64: "float64",
}
return dtype_to_desc[input.dtype]
raise TypeError(
f"Constant argument to arithmetic operation not supported. "
f"Got {str(type(input))}, expected "
f"a constant value of type 'bool', 'int', 'float' or 'nvidia.dali.types.Constant'.")
# Group inputs into categories_idxs, edges of type ``edge_type``,
# integer constants and real constants.
# The categories_idxs is a list that for an input `i` contains a tuple:
# (category of ith input, index of ith input in appropriate category)
def _group_inputs(inputs, edge_type=_DataNode):
"""
Group inputs into three groups:
* edges of type ``edge_type`` - those are actual inputs like DataNode,
* integer constants,
* real constants.
Generate `categories_idxs` mapping, that is a list that for an input `i` contains a tuple:
(category of ith input, index of ith input in appropriate category)
Parameters
----------
inputs :
All arguments that were passed to the arithmetic operators
edge_type :
What should be considered an input, _DataNode or a TensorList (used for debug and eager
modes), by default _DataNode
Returns
-------
(`categories_idxs`, input edge category, integer constants category, real constants category)
Mapping of inputs into the categories and the three possible categories.
"""
categories_idxs = []
edges = []
integers = []
reals = []
for input in inputs:
if not isinstance(input, (edge_type, _ScalarConstant, int, float)):
input = _Constant(input)
if isinstance(input, edge_type):
categories_idxs.append(("edge", len(edges)))
edges.append(input)
elif _is_integer_like(input):
categories_idxs.append(("integer", len(integers)))
integers.append(input)
elif _is_real_like(input):
categories_idxs.append(("real", len(reals)))
reals.append(input)
else:
raise TypeError(f"Argument to arithmetic operation not supported."
f"Got {str(type(input))}, expected a return value from other"
f"DALI Operator or a constant value of type 'bool', 'int', "
f"'float' or 'nvidia.dali.types.Constant'.")
if len(integers) == 0:
integers = None
if len(reals) == 0:
reals = None
return (categories_idxs, edges, integers, reals)
def _generate_input_desc(categories_idx, integers, reals):
"""
Generate the list of <input> subexpression as specified
by grammar for ArithmeticGenericOp
"""
input_desc = ""
for i, (category, idx) in enumerate(categories_idx):
if category == "edge":
input_desc += "&{}".format(idx)
elif category == "integer":
input_desc += "${}:{}".format(idx, _to_type_desc(integers[idx]))
elif category == "real":
input_desc += "${}:{}".format(idx, _to_type_desc(reals[idx]))
if i < len(categories_idx) - 1:
input_desc += " "
return input_desc
def _arithm_op(name, *inputs):
"""
Create arguments for ArithmeticGenericOp and call it with supplied inputs.
Select the `gpu` device if at least one of the inputs is `gpu`, otherwise `cpu`.
"""
import nvidia.dali.ops # Allow for late binding of the ArithmeticGenericOp from parent module.
categories_idxs, edges, integers, reals = _group_inputs(inputs)
input_desc = _generate_input_desc(categories_idxs, integers, reals)
expression_desc = "{}({})".format(name, input_desc)
dev = nvidia.dali.ops._choose_device(edges)
# Create "instance" of operator
op = nvidia.dali.ops.ArithmeticGenericOp(device=dev, expression_desc=expression_desc,
integer_constants=integers, real_constants=reals)
# If we are on gpu, we must mark all inputs as gpu
if dev == "gpu":
dev_inputs = list(edge.gpu() for edge in edges)
else:
dev_inputs = edges
# Call it immediately
result = op(*dev_inputs)
if _conditionals.conditionals_enabled():
_conditionals.register_data_nodes(result, dev_inputs)
return result
|
DALI-main
|
dali/python/nvidia/dali/ops/_operators/math.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Conversion of eager-style Python into user-customized graph code based on
TensorFlow conversion.
AutoGraph transforms a subset of Python which operates on user-defined objects
into equivalent user-defined graph code. When executing the graph, it has the same
effect as if you ran the original code in eager mode. This AutoGraph fork introduces
customization points for the detection of user-defined objects and operator overloads.
The customization point can be controlled by inheriting from OperatorBase and passing
it to the initialize_autograph function.
Python code which doesn't operate on user-defined objects remains functionally
unchanged, but keep in mind that AutoGraph only executes such code at trace
time, and generally will not be consistent with eager execution.
For more information, see the
[AutoGraph reference documentation](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/
python/autograph/g3doc/reference/index.md),
and the [tf.function guide](https://www.tensorflow.org/guide/function#autograph_transformations).
"""
# TODO(mdan): Bring only the relevant symbols to the top level.
from nvidia.dali._autograph import operators
from nvidia.dali._autograph import utils
from nvidia.dali._autograph.core.converter import ConversionOptions
from nvidia.dali._autograph.core.converter import Feature
from nvidia.dali._autograph.impl.api import initialize_autograph
from nvidia.dali._autograph.impl.api import AutoGraphError
from nvidia.dali._autograph.impl.api import convert
from nvidia.dali._autograph.impl.api import converted_call
from nvidia.dali._autograph.impl.api import do_not_convert
# from nvidia.dali._autograph.impl.api import StackTraceMapper
from nvidia.dali._autograph.impl.api import to_code
from nvidia.dali._autograph.impl.api import to_graph
from nvidia.dali._autograph.lang.directives import set_element_type
from nvidia.dali._autograph.lang.directives import set_loop_options
from nvidia.dali._autograph.utils import ag_logging
from nvidia.dali._autograph.utils.all_utils import _remove_undocumented
from nvidia.dali._autograph.utils.hooks import OperatorBase
# TODO(mdan): Revisit this list once we finalize the generated code mechanism.
_allowed_symbols = [
# Main API
'AutoGraphError',
'ConversionOptions',
'Feature',
# 'StackTraceMapper',
'convert',
'converted_call',
'do_not_convert',
'to_code',
'to_graph',
# Overloaded operators
'operators',
# Python language "extensions"
'set_element_type',
'set_loop_options',
'stack',
'tensor_list',
# Utilities: to be removed
'utils',
]
_remove_undocumented(__name__, _allowed_symbols)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core conversion logic, serves as main point of access."""
import functools
import inspect
import sys
import unittest
from nvidia.dali._autograph.core import config
from nvidia.dali._autograph.pyct import cache
from nvidia.dali._autograph.pyct import inspect_utils
from nvidia.dali._autograph.utils import ag_logging as logging
_ALLOWLIST_CACHE = cache.UnboundInstanceCache()
def _is_of_known_loaded_module(f, module_name):
mod = sys.modules.get(module_name, None)
if mod is None:
return False
if any(v is not None for v in mod.__dict__.values() if f is v):
return True
return False
def _is_known_loaded_type(f, module_name, entity_name):
"""Tests whether the function or method is an instance of a known type."""
if (module_name not in sys.modules or
not hasattr(sys.modules[module_name], entity_name)):
return False
type_entity = getattr(sys.modules[module_name], entity_name)
if isinstance(f, type_entity):
# The method if of this type. Example:
#
# o = ClassType()
# function(o.method)()
return True
# Note: inspect is required here, to avoid unpacking tf.function decorators.
if inspect.ismethod(f):
# The unbound method if of this type. Example:
#
# class ClassType:
# @function
# def method(self):
# ...
# o = ClassType()
# o.method()
if isinstance(f.__func__, type_entity):
return True
return False
def is_unsupported(o):
"""Checks whether an entity is supported by AutoGraph at all."""
# TODO(b/122265385): Remove this bypass.
if (_is_known_loaded_type(o, 'wrapt', 'FunctionWrapper') or
_is_known_loaded_type(o, 'wrapt', 'BoundFunctionWrapper')):
logging.warning(
'{} appears to be decorated by wrapt, which is not yet supported'
' by AutoGraph. The function will run as-is.'
' You may still apply AutoGraph before the wrapt decorator.'.format(o))
logging.log(2, 'Permanently allowed: %s: wrapt decorated', o)
return True
if _is_known_loaded_type(o, 'functools', '_lru_cache_wrapper'):
logging.log(2, 'Permanently allowed: %s: lru_cache', o)
return True
# Constructors are permanently allowed.
# TODO(mdan): Toggle as experimental feature instead.
# TODO(b/124016764): Remove this limitation.
if inspect_utils.isconstructor(o):
logging.log(2, 'Permanently allowed: %s: constructor', o)
return True
# Other built-in modules are permanently allowed.
# TODO(mdan): Figure out how to do this consistently for all stdlib modules.
if any(
_is_of_known_loaded_module(o, m)
for m in ('collections', 'pdb', 'copy', 'inspect', 're')):
logging.log(2, 'Permanently allowed: %s: part of builtin module', o)
return True
# Custom ops and kernels are also permanently allowed.
# See tensorflow.framework.load_library.
if (hasattr(o, '__module__') and
hasattr(o.__module__, '_IS_TENSORFLOW_PLUGIN')):
logging.log(2, 'Permanently allowed: %s: TensorFlow plugin', o)
return True
return False
# TODO(mdan): allow_namedtuple_subclass should be hardcoded to True.
def is_allowlisted(
o, check_call_override=True, allow_namedtuple_subclass=False):
"""Checks whether an entity is allowed for use in graph mode.
Examples of allowed entities include all members of the tensorflow
package.
Args:
o: A Python entity.
check_call_override: Reserved for internal use. When set to `False`, it
disables the rule according to which classes are allowed if their
__call__ method is allowed.
allow_namedtuple_subclass: Reserved for internal use. When `True`,
namedtuple subclasses are not allowed.
Returns:
Boolean
"""
# TODO(b/120224672): Fix this.
if isinstance(o, functools.partial):
# inspect.getmodule(functools.partial(...)) otherwise returns None since
# functools.partial objects do not have a __module__ attribute.
m = functools
else:
m = inspect.getmodule(o)
# Examples of callables that lack a __module__ property include builtins.
if hasattr(m, '__name__'):
for rule in config.CONVERSION_RULES:
action = rule.get_action(m)
if action == config.Action.CONVERT:
logging.log(2, 'Not allowed: %s: %s', o, rule)
return False
elif action == config.Action.DO_NOT_CONVERT:
logging.log(2, 'Allowlisted: %s: %s', o, rule)
return True
# The check for __code__ below is because isgeneratorfunction crashes
# without one.
if hasattr(o, '__code__') and inspect.isgeneratorfunction(o):
logging.log(2, 'Allowlisted: %s: generator functions are not converted', o)
return True
if (check_call_override and not inspect.isclass(o) and
hasattr(o, '__call__')):
# Callable objects: allowed if their __call__ method is.
# The type check avoids infinite recursion around the __call__ method
# of function objects.
if (type(o) != type(o.__call__)) and is_allowlisted(o.__call__): # pylint: disable=unidiomatic-typecheck # noqa: E721,E501
logging.log(2, 'Allowlisted: %s: object __call__ allowed', o)
return True
owner_class = None
if inspect.ismethod(o):
# Methods of allowed classes are also allowed, even if they are
# bound via user subclasses.
#
# For example, suppose `tf.Foo` has a method called `bar`, and `baz` is
# defined as below. `tf.Foo` is allowed. Then `baz.bar` is also
# allowed.
#
# class Custom(tf.Foo):
# pass
#
# baz = Custom()
#
# For the example above, if `Custom` did overload `bar`, then it would no
# longer be allowed.
owner_class = inspect_utils.getmethodclass(o)
if owner_class is not None:
if issubclass(owner_class, unittest.TestCase):
logging.log(2, 'Allowlisted: %s: method of TestCase subclass', o)
return True
owner_class = inspect_utils.getdefiningclass(o, owner_class)
if is_allowlisted(
owner_class,
check_call_override=False,
allow_namedtuple_subclass=True):
logging.log(2, 'Allowlisted: %s: owner is allowed %s', o,
owner_class)
return True
if inspect_utils.isnamedtuple(o):
# Due to the way they're constructed, namedtuple types cannot be converted
# because they don't expose source code. But we assume they are safe for
# graph mode since they are just containers.
if allow_namedtuple_subclass:
if not any(inspect_utils.isnamedtuple(base) for base in o.__bases__):
logging.log(2, 'Allowlisted: %s: named tuple', o)
return True
else:
logging.log(2, 'Allowlisted: %s: named tuple or subclass', o)
return True
logging.log(2, 'Not allowed: %s: default rule', o)
return False
def is_in_allowlist_cache(entity, options):
try:
return _ALLOWLIST_CACHE.has(entity, options)
except TypeError:
# Catch-all for entities that are unhashable or don't allow weakrefs.
return False
def cache_allowlisted(entity, options):
try:
_ALLOWLIST_CACHE[entity][options] = True
except TypeError:
# Catch-all for entities that are unhashable or don't allow weakrefs.
pass
|
DALI-main
|
dali/python/nvidia/dali/_autograph/impl/conversion.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
DALI-main
|
dali/python/nvidia/dali/_autograph/impl/__init__.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module contains the user- and codegen-facing API for AutoGraph."""
import functools
import importlib
import inspect
import os
import sys
import textwrap
import traceback
from nvidia.dali._autograph import operators
from nvidia.dali._autograph import utils
from nvidia.dali._autograph.converters import asserts
from nvidia.dali._autograph.converters import break_statements
from nvidia.dali._autograph.converters import call_trees
from nvidia.dali._autograph.converters import conditional_expressions
from nvidia.dali._autograph.converters import continue_statements
from nvidia.dali._autograph.converters import control_flow
from nvidia.dali._autograph.converters import directives
from nvidia.dali._autograph.converters import functions
from nvidia.dali._autograph.converters import lists
from nvidia.dali._autograph.converters import logical_expressions
from nvidia.dali._autograph.converters import return_statements
from nvidia.dali._autograph.converters import slices
from nvidia.dali._autograph.converters import variables
from nvidia.dali._autograph.core import ag_ctx
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.core import config
from nvidia.dali._autograph.core import function_wrappers
from nvidia.dali._autograph.core import unsupported_features_checker
from nvidia.dali._autograph.impl import conversion
from nvidia.dali._autograph.operators import py_builtins
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import cfg
from nvidia.dali._autograph.pyct import error_utils
from nvidia.dali._autograph.pyct import errors
from nvidia.dali._autograph.pyct import inspect_utils
from nvidia.dali._autograph.pyct import origin_info
from nvidia.dali._autograph.pyct import qual_names
from nvidia.dali._autograph.pyct import transpiler
from nvidia.dali._autograph.pyct.static_analysis import activity
from nvidia.dali._autograph.pyct.static_analysis import reaching_definitions
from nvidia.dali._autograph.utils import hooks
from nvidia.dali._autograph.utils import ag_logging as logging
from nvidia.dali._autograph.utils import all_utils
# TODO(klecki): replace missing functionality
# from nvidia.dali._autograph.utils import tf_stack
from nvidia.dali._autograph.utils.all_utils import export_symbol
def is_autograph_strict_conversion_mode():
return int(os.environ.get('AUTOGRAPH_STRICT_CONVERSION', '0')) > 0
#
# Error handling
#
# TODO(mdan): Export this symbol.
class AutoGraphError(errors.PyCTError):
"""Base class for all AutoGraph exceptions."""
pass
class ConversionError(AutoGraphError):
"""Raised during the conversion process."""
pass
class StagingError(AutoGraphError):
"""Raised during the staging (i.e. Python execution) of converted code."""
pass
class _ErrorMetadata(error_utils.ErrorMetadataBase):
"""AutoGraph-specific error metadata. See base class."""
def create_exception(self, source_error):
preferred_type = type(source_error)
if preferred_type in (errors.PyCTError, AutoGraphError, ConversionError, StagingError):
return preferred_type(self.get_message())
exc = super(_ErrorMetadata, self).create_exception(source_error)
if exc is not None:
return exc
# Note: While changing an error's message property to change the message it
# displays will probably work a lot of times, there is no standard way in
# Python to do that. The safest way is therefore to create a new exception.
# For user defined exceptions, we could define an interface that allowed
# them to work under this mechanism.
return StagingError(self.get_message())
def _attach_error_metadata(e, f):
"""Augments an error with the metadata necessary for rewrite."""
if hasattr(e, 'ag_pass_through'):
return
metadata = getattr(e, 'ag_error_metadata', None)
source_map = f.ag_source_map
if metadata is None:
logging.log(1, 'Caught error in user callable %s', f, exc_info=True)
message = '{}: {}'.format(e.__class__.__name__, e)
else:
message = None
cause_tb = traceback.extract_tb(sys.exc_info()[2])[1:]
e.ag_error_metadata = _ErrorMetadata(cause_tb, metadata, message, source_map,
__file__)
# class StackTraceMapper(tf_stack.StackTraceMapper):
# """Remaps generated code to code it originated from."""
# def __init__(self, converted_fn):
# super().__init__()
# self._source_map = converted_fn.ag_source_map
# # This may be called repeatedly: once on entry, by the superclass, then by
# # each child context manager.
# self._cached_map = None
# def get_effective_source_map(self):
# if self._cached_map is not None:
# return self._cached_map
# parent_map = self.parent.get_effective_source_map()
# effective_source_map = {}
# for loc, origin in self._source_map.items():
# effective_source_map[(loc.filename, loc.lineno)] = (origin.loc.filename,
# origin.loc.lineno,
# origin.function_name)
# for key, value in parent_map.items():
# filename, lineno, _ = value
# value_loc = origin_info.LineLocation(filename=filename, lineno=lineno)
# if value_loc in self._source_map:
# origin = self._source_map[value_loc]
# effective_source_map[key] = (origin.loc.filename, origin.loc.lineno,
# origin.function_name)
# else:
# effective_source_map[key] = value
# self._cached_map = effective_source_map
# return effective_source_map
#
# Actual source code transformation
#
class PyToLib(transpiler.PyToPy):
"""The TensorFlow AutoGraph transformer."""
def __init__(self, name, operator_overload):
super(PyToLib, self).__init__()
self._name = name
self._operator_overload = operator_overload
self._extra_locals = None
def get_transformed_name(self, node):
return self._name + "__" + super(PyToLib, self).get_transformed_name(node)
def get_extra_locals(self):
if self._extra_locals is None:
# TODO(mdan): Move into core or replace with an actual importable module.
# Craft a module that exposes the external API as well as certain
# internal modules.
module_spec = importlib.machinery.ModuleSpec(self._name, None)
ag_internal = importlib.util.module_from_spec(module_spec)
ag_internal.__dict__.update(inspect.getmodule(PyToLib).__dict__)
ag_internal.ConversionOptions = converter.ConversionOptions
ag_internal.STD = converter.STANDARD_OPTIONS
ag_internal.Feature = converter.Feature
ag_internal.utils = utils
ag_internal.FunctionScope = function_wrappers.FunctionScope
ag_internal.with_function_scope = function_wrappers.with_function_scope
# TODO(mdan): Add safeguards against name clashes.
# We don't want to create a submodule because we want the operators to be
# accessible as ag__.<operator>
ag_internal.__dict__.update(operators.__dict__)
ag_internal.hooks = hooks
ag_internal.hooks._DISPATCH = self._operator_overload
self._extra_locals = {'ag__': ag_internal}
return self._extra_locals
def get_caching_key(self, ctx):
return ctx.options
def initial_analysis(self, node, ctx):
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, ctx, None)
node = reaching_definitions.resolve(node, ctx, graphs)
anno.dup(
node,
{
anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,
},
)
return node
def transform_ast(self, node, ctx):
unsupported_features_checker.verify(node)
node = self.initial_analysis(node, ctx)
node = functions.transform(node, ctx)
node = directives.transform(node, ctx)
node = break_statements.transform(node, ctx)
if ctx.user.options.uses(converter.Feature.ASSERT_STATEMENTS):
node = asserts.transform(node, ctx)
# Note: sequencing continue canonicalization before for loop one avoids
# dealing with the extra loop increment operation that the for
# canonicalization creates.
node = continue_statements.transform(node, ctx)
node = return_statements.transform(node, ctx)
if ctx.user.options.uses(converter.Feature.LISTS):
node = lists.transform(node, ctx)
node = slices.transform(node, ctx)
node = call_trees.transform(node, ctx)
node = control_flow.transform(node, ctx)
node = conditional_expressions.transform(node, ctx)
node = logical_expressions.transform(node, ctx)
node = variables.transform(node, ctx)
return node
def _convert_actual(entity, program_ctx):
"""Applies AutoGraph to entity."""
# TODO(mdan): Put these extra fields inside __autograph_info__.
if not hasattr(entity, '__code__'):
raise ValueError('Cannot apply autograph to a function that doesn\'t '
'expose a __code__ object.')
transformed, module, source_map = _TRANSPILER.transform(entity, program_ctx)
assert not hasattr(transformed, 'ag_module')
assert not hasattr(transformed, 'ag_source_map')
transformed.ag_module = module
transformed.ag_source_map = source_map
return transformed
#
# Generated code support
#
def autograph_artifact(entity, extras=None):
if inspect.ismethod(entity):
setattr(entity.__func__, 'autograph_info__', extras)
else:
setattr(entity, 'autograph_info__', extras)
return entity
def is_autograph_artifact(entity):
return hasattr(entity, 'autograph_info__')
def converted_call(f, args, kwargs, caller_fn_scope=None, options=None):
"""Converts a function call inline.
For internal use only.
Note: The argument list is optimized for readability of generated code, which
may look like this:
ag__.converted_call(f, (arg1, arg2), None, fscope)
ag__.converted_call(f, (), dict(arg1=val1, **kwargs), fscope)
ag__.converted_call(f, (arg1, arg2) + varargs, dict(**kwargs), lscope)
Args:
f: The function to convert.
args: Tuple, the original positional arguments of f
kwargs: Optional[Dict], the original keyword arguments of f
caller_fn_scope: Optional[function_wrappers.FunctionScope], the function
scope of the converted function in which this call was originally made.
options: Optional[converter.ConversionOptions], conversion options. If not
specified, the value of caller_fn_scope.callopts is used. Either options
or caller_fn_scope must be present.
Returns:
Any, the result of executing a possibly-converted `f` with the given
arguments.
"""
logging.log(1, 'Converted call: %s\n args: %s\n kwargs: %s\n', f, args,
kwargs)
if options is None:
if caller_fn_scope is None:
raise ValueError('either caller_fn_scope or options must have a value')
options = caller_fn_scope.callopts
if conversion.is_in_allowlist_cache(f, options):
logging.log(2, 'Allowlisted %s: from cache', f)
return _call_unconverted(f, args, kwargs, options, False)
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
logging.log(2, 'Allowlisted: %s: AutoGraph is disabled in context', f)
return _call_unconverted(f, args, kwargs, options, False)
if is_autograph_artifact(f):
logging.log(2, 'Permanently allowed: %s: AutoGraph artifact', f)
return _call_unconverted(f, args, kwargs, options)
# If this is a partial, unwrap it and redo all the checks.
if isinstance(f, functools.partial):
new_kwargs = {}
if f.keywords is not None:
# Use copy to avoid mutating the underlying keywords.
new_kwargs = f.keywords.copy()
if kwargs is not None:
new_kwargs.update(kwargs)
new_args = f.args + args
logging.log(3, 'Forwarding call of partial %s with\n%s\n%s\n', f, new_args,
new_kwargs)
return converted_call(
f.func,
new_args,
new_kwargs,
caller_fn_scope=caller_fn_scope,
options=options)
if inspect_utils.isbuiltin(f):
if f is eval:
return py_builtins.eval_in_original_context(f, args, caller_fn_scope)
if f is super:
return py_builtins.super_in_original_context(f, args, caller_fn_scope)
if f is globals:
return py_builtins.globals_in_original_context(caller_fn_scope)
if f is locals:
return py_builtins.locals_in_original_context(caller_fn_scope)
if kwargs:
return py_builtins.overload_of(f)(*args, **kwargs)
else:
return py_builtins.overload_of(f)(*args)
if conversion.is_unsupported(f):
return _call_unconverted(f, args, kwargs, options)
if not options.user_requested and conversion.is_allowlisted(f):
return _call_unconverted(f, args, kwargs, options)
# internal_convert_user_code is for example turned off when issuing a dynamic
# call conversion from generated code while in nonrecursive mode. In that
# case we evidently don't want to recurse, but we still have to convert
# things like builtins.
if not options.internal_convert_user_code:
return _call_unconverted(f, args, kwargs, options)
try:
if inspect.ismethod(f) or inspect.isfunction(f):
target_entity = f
effective_args = args
f_self = getattr(f, '__self__', None)
if f_self is not None:
effective_args = (f_self,) + effective_args
elif hasattr(f, '__class__') and hasattr(f.__class__, '__call__'):
# Callable objects. Dunder methods have special lookup rules, see:
# https://docs.python.org/3/reference/datamodel.html#specialnames
# TODO(mdan): Recurse into converted_call to simplify other verifications.
# This should be handled in the same way as partials.
target_entity = f.__class__.__call__
effective_args = (f,) + args
else:
target_entity = f
raise NotImplementedError('unknown callable type "%s"' % type(f))
except Exception as e: # pylint:disable=broad-except
logging.log(1, 'Error transforming entity %s', target_entity, exc_info=True)
if is_autograph_strict_conversion_mode():
raise
return _fall_back_unconverted(f, args, kwargs, options, e)
if not hasattr(target_entity, '__code__'):
logging.log(2, 'Permanently allowed: %s: native binding', target_entity)
return _call_unconverted(f, args, kwargs, options)
elif (hasattr(target_entity.__code__, 'co_filename') and
target_entity.__code__.co_filename == '<string>'):
# TODO(mdan): __globals__['txt'] might work in Py3.
logging.log(2, 'Permanently allowed: %s: dynamic code (exec?)',
target_entity)
return _call_unconverted(f, args, kwargs, options)
try:
program_ctx = converter.ProgramContext(options=options)
converted_f = _convert_actual(target_entity, program_ctx)
if logging.has_verbosity(2):
_log_callargs(converted_f, effective_args, kwargs)
except Exception as e: # pylint:disable=broad-except
logging.log(1, 'Error transforming entity %s', target_entity, exc_info=True)
if is_autograph_strict_conversion_mode():
raise
return _fall_back_unconverted(f, args, kwargs, options, e)
# TODO(klecki): Revert the stack trace mapping functionality
# with StackTraceMapper(converted_f), tf_stack.CurrentModuleFilter():
try:
if kwargs is not None:
result = converted_f(*effective_args, **kwargs)
else:
result = converted_f(*effective_args)
except Exception as e:
_attach_error_metadata(e, converted_f)
raise
return result
def _call_unconverted(f, args, kwargs, options, update_cache=True):
"""Calls the original function without converting with AutoGraph."""
if update_cache:
conversion.cache_allowlisted(f, options)
if kwargs is not None:
return f(*args, **kwargs)
return f(*args)
def _fall_back_unconverted(f, args, kwargs, options, exc):
"""Falls back to calling the function unconverted, in case of error."""
# TODO(mdan): Consider adding an internal metric.
warning_template = (
'AutoGraph could not transform %s and will run it as-is.\n'
'%s'
'Cause: %s\n')
# TODO(klecki): Expose the do_not_convert in DALI
# 'To silence this warning, decorate the function with'
# ' @tf.autograph.experimental.do_not_convert')
if isinstance(exc, errors.InaccessibleSourceCodeError):
if ag_ctx.INSPECT_SOURCE_SUPPORTED:
logging.warning(warning_template, f, '', exc)
elif isinstance(exc, errors.UnsupportedLanguageElementError):
if not conversion.is_in_allowlist_cache(f, options):
logging.warning(warning_template, f, '', exc)
else:
# TODO(klecki): Do we want to report such errors?
# file_bug_message = (
# 'Please report this to the TensorFlow team. When filing the bug, set'
# ' the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and'
# ' attach the full output.\n')
logging.warning(warning_template, f, '', exc)
return _call_unconverted(f, args, kwargs, options)
#
# TensorFlow integration
#
@export_symbol('__internal__.autograph.tf_convert', v1=[])
def tf_convert(f, ctx, convert_by_default=True, user_requested=False):
"""Decorator that applies AutoGraph to a function.
Use in internal APIs.
This API is suitable for high order functions internal to the TensorFlow API,
and more generally any function to which AutoGraph is not applied.
Guidance: `convert` was a decorator meant for use directly by developers, but
most of today's uses go through `tf.function`. `tf_convert` is to be called
from high order functions internal to TF. By default, all the internal
TensorFlow functions are skipped when AutoGraph processes the code. This may
lead to user-supplied functions to be incorrectly skipped as well.
`tf_convert` helps avoid that. See the following example for more details.
```
=====tf_internal_module.py=====
def unconverted(input_fn):
return input_fn()
def converted(input_fn):
return tf.__internal__.autograph.tf_convert(
input_fn, ctx=tf.__internal__.autograph.control_status_ctx())()
======user_module.py======
@tf.function
def foo(input_fn)
return unconverted(input_fn)
@tf.function
def bar(input_fn)
return converted(input_fn)
@tf.function(autograph=False)
def baz(input_fn)
return converted(input_fn)
```
The `foo` method above will execute the `input_fn` without autograph
conversion, while the `bar` method will run an autographed `input_fn`. The
`baz` method will run an unconverted `input_fn`, since `tf_convert` respect
the control status context.
Note that both methods in `tf_internal_module` are skipped by autograph when
tracing the `tf.function`. The configuration of whether a module/package
should be skipped by autograph is controlled in
tensorflow/python/autograph/core/config.py.
Args:
f: Callable.
ctx: ag_ctx.ControlStatusCtx, the Autograph context in which `f` is used.
convert_by_default: bool, whether to use AutoGraph when the context doesn't
specify.
user_requested: bool, whether to ignore the conversion allowlist. See
ConversionOptions.user_requested.
Returns:
Either `f or the converted version of `f`.
"""
if is_autograph_artifact(f):
return f
# TODO(mdan): Grab features from context.
# Note: we pass the original context through to convert to properly handle the
# following scenario, which can be used inside TF implementations:
#
# ctx = ag_ctx.control_status_ctx()
# @function(autograph=False) # Low-level graph code
# def inner_fn():
# # The context is disabled here, but should be enabled in user user_fn
# tf_convert(user_fn, ctx=ctx)
if ctx.status == ag_ctx.Status.ENABLED:
wrapper_factory = convert(
recursive=True, user_requested=user_requested, conversion_ctx=ctx)
elif ctx.status == ag_ctx.Status.DISABLED:
wrapper_factory = do_not_convert
elif ctx.status == ag_ctx.Status.UNSPECIFIED:
if convert_by_default:
wrapper_factory = convert(
recursive=True, user_requested=user_requested, conversion_ctx=ctx)
else:
wrapper_factory = call_with_unspecified_conversion_status
else:
assert False, 'This switch contains all possible cases!'
wrapper = wrapper_factory(f)
return autograph_artifact(wrapper)
def call_with_unspecified_conversion_status(func):
"""Decorator that resets the conversion context to the unspecified status."""
def wrapper(*args, **kwargs):
with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.UNSPECIFIED):
return func(*args, **kwargs)
if inspect.isfunction(func) or inspect.ismethod(func):
wrapper = functools.update_wrapper(wrapper, func)
return autograph_artifact(wrapper)
def _log_callargs(f, args, kwargs):
"""Logging helper."""
logging.log(2, 'Defaults of %s : %s', f, f.__defaults__)
logging.log(2, 'KW defaults of %s : %s', f, f.__kwdefaults__)
if kwargs is not None:
callargs = inspect.getcallargs(f, *args, **kwargs)
else:
callargs = inspect.getcallargs(f, *args)
formatted_callargs = '\n'.join(
' {}: {}'.format(k, v) for k, v in callargs.items())
logging.log(2, 'Calling %s with\n%s\n', f, formatted_callargs)
#
# Public API
#
@export_symbol('autograph.experimental.do_not_convert')
def do_not_convert(func=None):
"""Decorator that suppresses the conversion of a function.
Args:
func: function to decorate.
Returns:
If `func` is not None, returns a `Callable` which is equivalent to
`func`, but is not converted by AutoGraph.
If `func` is None, returns a decorator that, when invoked with a
single `func` argument, returns a `Callable` equivalent to the
above case.
"""
if func is None:
return do_not_convert
def wrapper(*args, **kwargs):
with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):
return func(*args, **kwargs)
if inspect.isfunction(func) or inspect.ismethod(func):
wrapper = functools.update_wrapper(wrapper, func)
return autograph_artifact(wrapper)
# TODO(mdan): Make private.
def convert(recursive=False,
optional_features=None,
user_requested=True,
conversion_ctx=ag_ctx.NullCtx()):
"""Decorator that compiles a function to use TensorFlow ops.
The decorator is dynamic - it recompiles the target whenever the decorated
function is called. This means the parameter values are known at conversion.
It also means that repeated calls with different types of parameters will be
correctly processed.
Args:
recursive: bool, whether to recursively convert any functions or classes
that the converted function may use.
optional_features: converted.Feature, allows toggling optional or
experimental features. When set to None, only the core features are
enabled.
user_requested: bool, whether this is a function that the user explicitly
asked to be converted. See ConversionOptions.user_requested.
conversion_ctx: Optional ag_ctx.ControlStatusCtx, the Autograph context in
which `f` is used.
Returns:
Callable, a decorator that converts the given function into an equivalent
function that uses TensorFlow ops.
"""
def decorator(f):
"""Decorator implementation."""
def wrapper(*args, **kwargs):
"""Wrapper that calls the converted version of f."""
options = converter.ConversionOptions(
recursive=recursive,
user_requested=user_requested,
optional_features=optional_features)
try:
with conversion_ctx:
return converted_call(f, args, kwargs, options=options)
except Exception as e: # pylint:disable=broad-except
if hasattr(e, 'ag_error_metadata'):
raise e.ag_error_metadata.to_exception(e)
else:
raise
if inspect.isfunction(f) or inspect.ismethod(f):
wrapper = functools.update_wrapper(wrapper, f)
decorated_wrapper = all_utils.make_decorator(f, wrapper)
return autograph_artifact(decorated_wrapper)
return decorator
# pylint:disable=line-too-long
@export_symbol('autograph.to_graph', v1=[])
def to_graph(entity, recursive=True, experimental_optional_features=None):
"""Converts a Python entity into a TensorFlow graph.
Also see: `tf.autograph.to_code`, `tf.function`.
Unlike `tf.function`, `to_graph` is a low-level transpiler that converts
Python code to TensorFlow graph code. It does not implement any caching,
variable management or create any actual ops, and is best used where greater
control over the generated TensorFlow graph is desired. Another difference
from `tf.function` is that `to_graph` will not wrap the graph into a
TensorFlow function or a Python callable. Internally, `tf.function` uses
`to_graph`.
Example usage:
>>> def f(x):
... if x > 0:
... y = x * x
... else:
... y = -x
... return y
...
>>> converted_f = to_graph(f)
>>> x = tf.constant(2)
>>> converted_f(x) # converted_foo is like a TensorFlow Op.
<tf.Tensor: shape=(), dtype=int32, numpy=4>
Supported Python entities include:
* functions
* classes
* object methods
Functions are converted into new functions with converted code.
Classes are converted by generating a new class whose methods use converted
code.
Methods are converted into unbound function that have an additional first
argument called `self`.
For a tutorial, see the
[tf.function and AutoGraph guide](https://www.tensorflow.org/guide/function).
For more detailed information, see the
[AutoGraph reference documentation](https://github.com/tensorflow/tensorflow/blob/master/
tensorflow/python/autograph/g3doc/reference/index.md).
Args:
entity: Python callable or class to convert.
recursive: Whether to recursively convert any functions that the converted
function may call.
experimental_optional_features: `None`, a tuple of, or a single
`tf.autograph.experimental.Feature` value.
Returns:
Same as `entity`, the converted Python function or class.
Raises:
ValueError: If the entity could not be converted.
"""
try:
program_ctx = converter.ProgramContext(
options=converter.ConversionOptions(
recursive=recursive,
user_requested=True,
optional_features=experimental_optional_features))
return autograph_artifact(_convert_actual(entity, program_ctx))
except (ValueError, AttributeError, KeyError, NameError, AssertionError) as e:
logging.error(1, 'Error converting %s', entity, exc_info=True)
raise ConversionError('converting {}: {}: {}'.format(
entity, e.__class__.__name__, str(e)))
@export_symbol(v1=['autograph.to_graph'])
def to_graph_v1(entity,
recursive=True,
arg_values=None,
arg_types=None,
experimental_optional_features=None):
"""Converts a Python entity into a TensorFlow graph.
Also see: `tf.autograph.to_code`, `tf.function`.
Unlike `tf.function`, `to_graph` is a low-level transpiler that converts
Python code to TensorFlow graph code. It does not implement any caching,
variable management or create any actual ops, and is best used where greater
control over the generated TensorFlow graph is desired. Another difference
from `tf.function` is that `to_graph` will not wrap the graph into a
TensorFlow function or a Python callable. Internally, `tf.function` uses
`to_graph`.
_Example Usage_
```python
def foo(x):
if x > 0:
y = x * x
else:
y = -x
return y
converted_foo = to_graph(foo)
x = tf.constant(1)
y = converted_foo(x) # converted_foo is a TensorFlow Op-like.
assert is_tensor(y)
```
Supported Python entities include:
* functions
* classes
* object methods
Functions are converted into new functions with converted code.
Classes are converted by generating a new class whose methods use converted
code.
Methods are converted into unbound function that have an additional first
argument called `self`.
Args:
entity: Python callable or class to convert.
recursive: Whether to recursively convert any functions that the converted
function may call.
arg_values: Deprecated.
arg_types: Deprecated.
experimental_optional_features: `None`, a tuple of, or a single
`tf.autograph.experimental.Feature` value.
Returns:
Same as `entity`, the converted Python function or class.
Raises:
ValueError: If the entity could not be converted.
"""
del arg_types
del arg_values
return to_graph(
entity,
recursive=recursive,
experimental_optional_features=experimental_optional_features)
@export_symbol(v1=['autograph.to_code'])
def to_code_v1(entity,
recursive=True,
arg_values=None,
arg_types=None,
indentation=' ',
experimental_optional_features=None):
"""Returns the source code generated by AutoGraph, as a string.
Example usage:
>>> def f(x):
... if x < 0:
... x = -x
... return x
>>> tf.autograph.to_code(f)
"...def tf__f(x):..."
Also see: `tf.autograph.to_graph`.
Note: If a function has been decorated with `tf.function`, pass its
underlying Python function, rather than the callable that `tf.function
creates:
>>> @tf.function
... def f(x):
... if x < 0:
... x = -x
... return x
>>> tf.autograph.to_code(f.python_function)
"...def tf__f(x):..."
Args:
entity: Python callable or class.
recursive: Whether to recursively convert any functions that the converted
function may call.
arg_values: Deprecated.
arg_types: Deprecated.
indentation: Deprecated.
experimental_optional_features: `None`, a tuple of, or a single
`tf.autograph.experimental.Feature` value.
Returns:
The converted code as string.
"""
del arg_values
del arg_types
del indentation
return to_code(
entity,
recursive=recursive,
experimental_optional_features=experimental_optional_features)
@export_symbol('autograph.to_code', v1=[])
def to_code(entity, recursive=True, experimental_optional_features=None):
"""Returns the source code generated by AutoGraph, as a string.
Example usage:
>>> def f(x):
... if x < 0:
... x = -x
... return x
>>> tf.autograph.to_code(f)
"...def tf__f(x):..."
Also see: `tf.autograph.to_graph`.
Note: If a function has been decorated with `tf.function`, pass its
underlying Python function, rather than the callable that `tf.function
creates:
>>> @tf.function
... def f(x):
... if x < 0:
... x = -x
... return x
>>> tf.autograph.to_code(f.python_function)
"...def tf__f(x):..."
Args:
entity: Python callable or class to convert.
recursive: Whether to recursively convert any functions that the converted
function may call.
experimental_optional_features: `None`, a tuple of, or a single
`tf.autograph.experimental.Feature` value.
Returns:
The converted code as string.
"""
source = inspect.getsource(
to_graph(
entity,
recursive=recursive,
experimental_optional_features=experimental_optional_features))
return textwrap.dedent(source)
_TRANSPILER = None
def initialize_autograph(operator_overload=hooks.OperatorBase(),
converter_name="autograph",
convert_modules=[],
do_not_convert_modules=["nvidia.dali._autograph"]):
"""Initialize the AutoGraph with custom operator overloads.
Parameters
----------
operator_overload : subclass of autograph.OperatorBase(), optional
Customization point for detection of user-defined objects that trigger
the user-defined overload to be called by AutoGraph instead of falling
back to regular Python semantics, by default autograph.OperatorBase().
converter_name : str, optional
Name that is used to generated converted function names and as a fake module under which
the AutoGraph is inserted into them, by default "autograph".
convert_modules : list, optional
Provides a way to include extra modules that should be converted by the autograph.
In particular, the modules specified here take the precedence over `do_not_convert_modules`,
so that some submodules of the otherwise excluded modules can be converted.
do_not_convert_modules : list, optional
AutoGraph needs to filter the module that should not be converted. By default it will
only filter out its own functions, provide the list of module that should be ignored.
If the autograph is used under different name (for example included in the source as
some_library._ag), this parameter should be adjusted , by default ["autograph"]
"""
global _TRANSPILER
if _TRANSPILER is not None:
raise RuntimeError("AutoGraph already initialized")
_TRANSPILER = PyToLib(converter_name, operator_overload)
convert_rules = tuple(config.Convert(name) for name in convert_modules)
# Add the name of the initialized library to know libraries to stop recursive conversion
do_not_convert_rules = tuple(config.DoNotConvert(name) for name in do_not_convert_modules)
config.CONVERSION_RULES = ((config.DoNotConvert(converter_name),) + convert_rules +
do_not_convert_rules + config.CONVERSION_RULES)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/impl/api.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Checkers for detecting unsupported Python features."""
import gast
from nvidia.dali._autograph.pyct import errors
class UnsupportedFeaturesChecker(gast.NodeVisitor):
"""Quick check for Python features we know we don't support.
Any features detected will cause AutoGraph to not compile a function.
"""
def visit_Attribute(self, node):
if (node.attr is not None
and node.attr.startswith('__') and not node.attr.endswith('__')):
raise errors.UnsupportedLanguageElementError(
'mangled names are not yet supported')
self.generic_visit(node)
def visit_For(self, node):
if node.orelse:
raise errors.UnsupportedLanguageElementError(
'for/else statement not yet supported')
self.generic_visit(node)
def visit_While(self, node):
if node.orelse:
raise errors.UnsupportedLanguageElementError(
'while/else statement not yet supported')
self.generic_visit(node)
# These checks could potentially be replaced with inspect.isgeneratorfunction
# to avoid a getsource/parse/ast-walk round trip.
def visit_Yield(self, node):
raise errors.UnsupportedLanguageElementError('generators are not supported')
def visit_YieldFrom(self, node):
raise errors.UnsupportedLanguageElementError('generators are not supported')
def verify(node):
UnsupportedFeaturesChecker().visit(node)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/core/unsupported_features_checker.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global configuration."""
from nvidia.dali._autograph.core import config_lib
Action = config_lib.Action
Convert = config_lib.Convert
DoNotConvert = config_lib.DoNotConvert
# This list is evaluated in order and stops at the first rule that tests True
# for a definitely_convert of definitely_bypass call.
# The name under which autograph is used is registered in init_autograph.
CONVERSION_RULES = (
# Known packages
# <List of packages that we force-convert>
# Builtin modules
DoNotConvert('collections'),
DoNotConvert('copy'),
DoNotConvert('cProfile'),
DoNotConvert('inspect'),
DoNotConvert('ipdb'),
DoNotConvert('linecache'),
DoNotConvert('mock'),
DoNotConvert('pathlib'),
DoNotConvert('pdb'),
DoNotConvert('posixpath'),
DoNotConvert('pstats'),
DoNotConvert('re'),
DoNotConvert('threading'),
DoNotConvert('urllib'),
# Known libraries
DoNotConvert('matplotlib'),
DoNotConvert('numpy'),
DoNotConvert('pandas'),
DoNotConvert('PIL'),
DoNotConvert('absl.logging'),
)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/core/config.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global configuration support."""
import enum
# TODO(mdan): For better performance, allow each rule to take a set names.
class Rule(object):
"""Base class for conversion rules."""
def __init__(self, module_prefix):
self._prefix = module_prefix
def matches(self, module_name):
return (module_name.startswith(self._prefix + '.') or
module_name == self._prefix)
class Action(enum.Enum):
NONE = 0
CONVERT = 1
DO_NOT_CONVERT = 2
class DoNotConvert(Rule):
"""Indicates that this module should be not converted."""
def __str__(self):
return 'DoNotConvert rule for {}'.format(self._prefix)
def get_action(self, module):
if self.matches(module.__name__):
return Action.DO_NOT_CONVERT
return Action.NONE
class Convert(Rule):
"""Indicates that this module should be converted."""
def __str__(self):
return 'Convert rule for {}'.format(self._prefix)
def get_action(self, module):
if self.matches(module.__name__):
return Action.CONVERT
return Action.NONE
|
DALI-main
|
dali/python/nvidia/dali/_autograph/core/config_lib.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for wrapping converted functions bodies with auxiliary logic."""
from nvidia.dali._autograph.core import ag_ctx
from nvidia.dali._autograph.core import converter
from nvidia.dali._autograph.operators import variables
# TODO(mdan): Move this into operators - it represents a function definition.
class FunctionScope(object):
"""Context manager that wraps the body of a converted function.
This context manager handles various operations related to the scope of a
function:
* optional TF name scopes - these name scopes match the name of the
function, for easy visualization in tensorBoard;
* optional automatic control dependencies - this adds the same mechanism
for control dependencies that is used by `@tf.function`; it can be
optionally enabled when using `tf.autograph.to_graph`;
* tracking of autograph conversion state (whether it's enabled by the user,
conversion options);
"""
def __init__(self, function_name, scope_name, options):
self.name = scope_name
self.options = options
if options.user_requested:
self.autograph_ctx = ag_ctx.ControlStatusCtx(ag_ctx.Status.ENABLED,
options)
self.callopts = options.call_options()
def _sanitize(self, name):
"""See https://www.tensorflow.org/api_docs/python/tf/Graph#name_scope."""
# TensorFlow doesn't like leading underscores at the top level.
if name and name.startswith('_'):
name = 'fn' + name
return name
def __enter__(self):
if self.options.user_requested:
self.autograph_ctx.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.options.user_requested:
self.autograph_ctx.__exit__(exc_type, exc_val, exc_tb)
def ret(self, value, did_return):
"""Marks a value as returned from the function guarded by the scope."""
del did_return
if isinstance(value, variables.UndefinedReturnValue):
return None
return value
def with_function_scope(thunk, scope_name, options):
"""Inline version of the FunctionScope context manager."""
with FunctionScope('lambda_', scope_name, options) as scope:
return thunk(scope)
|
DALI-main
|
dali/python/nvidia/dali/_autograph/core/function_wrappers.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter construction support.
This module contains a base class for all converters, as well as supporting
structures. These structures are referred to as contexts.
The class hierarchy is as follows:
<your converter>
[extends] converter.Base
[extends] transformer.Base
[extends] gast.nodeTransformer
[uses] transformer.SourceInfo
[uses] converter.EntityContext
[uses] converter.ProgramContext
[uses] transformer.SourceInfo
converter.Base is a specialization of transformer.Base for AutoGraph. It's a
very lightweight subclass that adds a `ctx` attribute holding the corresponding
EntityContext object (see below). Note that converters are not reusable, and
`visit` will raise an error if called more than once.
converter.EntityContext contains mutable state associated with an entity that
the converter processes.
converter.ProgramContext contains mutable state across related entities. For
example, when converting several functions that call one another, the
ProgramContext should be shared across these entities.
Below is the overall flow at conversion:
program_ctx = ProgramContext(<entities to convert>, <global settings>, ...)
while <program_ctx has more entities to convert>:
entity, source_info = <get next entity from program_ctx>
entity_ctx = EntityContext(program_ctx, source_info)
for <each ConverterClass>:
converter = ConverterClass(entity_ctx)
# May update entity_ctx and program_ctx
entity = converter.visit(entity)
<add entity's dependencies to program_ctx>
Note that pyct contains a small number of transformers used for static analysis.
These implement transformer.Base, rather than converter.Base, to avoid a
dependency on AutoGraph.
"""
import enum
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import ast_util
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import templates
from nvidia.dali._autograph.pyct import transformer
from nvidia.dali._autograph.utils.all_utils import export_symbol
# TODO(mdan): These contexts can be refactored into first class objects.
# For example, we could define Program and Entity abstractions that hold on
# to the actual entity and have conversion methods.
# TODO(mdan): Add a test specific to this converter.
@export_symbol('autograph.experimental.Feature')
class Feature(enum.Enum):
"""This enumeration represents optional conversion options.
These conversion options are experimental. They are subject to change without
notice and offer no guarantees.
_Example Usage_
```python
optionals= tf.autograph.experimental.Feature.EQUALITY_OPERATORS
@tf.function(experimental_autograph_options=optionals)
def f(i):
if i == 0: # EQUALITY_OPERATORS allows the use of == here.
tf.print('i is zero')
```
Attributes:
ALL: Enable all features.
ASSERT_STATEMENTS: Convert Tensor-dependent assert statements to tf.Assert.
BUILTIN_FUNCTIONS: Convert builtin functions applied to Tensors to
their TF counterparts.
EQUALITY_OPERATORS: Whether to convert the equality operator ('==') to
tf.math.equal.
LISTS: Convert list idioms, like initializers, slices, append, etc.
"""
ALL = 'ALL'
ASSERT_STATEMENTS = 'ASSERT_STATEMENTS'
BUILTIN_FUNCTIONS = 'BUILTIN_FUNCTIONS'
EQUALITY_OPERATORS = 'EQUALITY_OPERATORS'
LISTS = 'LISTS'
@classmethod
def all(cls):
"""Returns a tuple that enables all options."""
return tuple(cls.__members__.values())
@classmethod
def all_but(cls, exclude):
"""Returns a tuple that enables all but the excluded options."""
if not isinstance(exclude, (list, tuple, set)):
exclude = (exclude,)
return tuple(set(cls.all()) - set(exclude) - {cls.ALL})
STANDARD_OPTIONS = None # Forward definition.
class ConversionOptions(object):
"""Immutable container for global conversion flags.
Attributes:
recursive: bool, whether to recursively convert any user functions or
classes that the converted function may use.
user_requested: bool, whether the conversion was explicitly requested by
the user, as opposed to being performed as a result of other logic. This
value always auto-resets to False in child conversions.
optional_features: Union[Feature, Set[Feature]], controls the use of
optional features in the conversion process. See Feature for available
options.
"""
def __init__(self,
recursive=False,
user_requested=False,
internal_convert_user_code=True,
optional_features=Feature.ALL):
self.recursive = recursive
self.user_requested = user_requested
# TODO(mdan): Rename to conversion_recursion_depth?
self.internal_convert_user_code = internal_convert_user_code
if optional_features is None:
optional_features = ()
elif isinstance(optional_features, Feature):
optional_features = (optional_features,)
optional_features = frozenset(optional_features)
self.optional_features = optional_features
def as_tuple(self):
return (self.recursive, self.user_requested,
self.internal_convert_user_code, self.optional_features)
def __hash__(self):
return hash(self.as_tuple())
def __eq__(self, other):
assert isinstance(other, ConversionOptions)
return self.as_tuple() == other.as_tuple()
def __str__(self):
return 'ConversionOptions[{}]'
def uses(self, feature):
return (Feature.ALL in self.optional_features or
feature in self.optional_features)
def call_options(self):
"""Returns the corresponding options to be used for recursive conversion."""
return ConversionOptions(
recursive=self.recursive,
user_requested=False,
internal_convert_user_code=self.recursive,
optional_features=self.optional_features)
def to_ast(self):
"""Returns a representation of this object as an AST node.
The AST node encodes a constructor that would create an object with the
same contents.
Returns:
ast.Node
"""
if self == STANDARD_OPTIONS:
return parser.parse_expression('ag__.STD')
template = """
ag__.ConversionOptions(
recursive=recursive_val,
user_requested=user_requested_val,
optional_features=optional_features_val,
internal_convert_user_code=internal_convert_user_code_val)
"""
def list_of_features(values):
return parser.parse_expression('({})'.format(', '.join(
'ag__.{}'.format(str(v)) for v in values)))
expr_ast = templates.replace(
template,
recursive_val=parser.parse_expression(str(self.recursive)),
user_requested_val=parser.parse_expression(str(self.user_requested)),
internal_convert_user_code_val=parser.parse_expression(
str(self.internal_convert_user_code)),
optional_features_val=list_of_features(self.optional_features))
return expr_ast[0].value
STANDARD_OPTIONS = ConversionOptions(
recursive=True,
user_requested=False,
internal_convert_user_code=True,
optional_features=None)
class ProgramContext(object):
"""ProgramContext keeps track of converting function hierarchies.
Attributes:
options: ConversionOptions
autograph_module: Deprecated. Do not use.
"""
def __init__(self, options, autograph_module=None):
self.options = options
self.autograph_module = autograph_module
class Base(transformer.Base):
"""All converters should inherit from this class.
Attributes:
ctx: EntityContext
"""
def __init__(self, ctx):
super(Base, self).__init__(ctx)
self._used = False
self._ast_depth = 0
def get_definition_directive(self, node, directive, arg, default):
"""Returns the unique directive argument for a symbol.
See lang/directives.py for details on directives.
Example:
# Given a directive in the code:
ag.foo_directive(bar, baz=1)
# One can write for an AST node Name(id='bar'):
get_definition_directive(node, ag.foo_directive, 'baz')
Args:
node: ast.AST, the node representing the symbol for which the directive
argument is needed.
directive: Callable[..., Any], the directive to search.
arg: str, the directive argument to return.
default: Any
Raises:
ValueError: if conflicting annotations have been found
"""
defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
if not defs:
return default
arg_values_found = []
for def_ in defs:
if (directive in def_.directives and arg in def_.directives[directive]):
arg_values_found.append(def_.directives[directive][arg])
if not arg_values_found:
return default
if len(arg_values_found) == 1:
return arg_values_found[0]
# If multiple annotations reach the symbol, they must all match. If they do,
# return any of them.
first_value = arg_values_found[0]
for other_value in arg_values_found[1:]:
if not ast_util.matches(first_value, other_value):
qn = anno.getanno(node, anno.Basic.QN)
raise ValueError(
'%s has ambiguous annotations for %s(%s): %s, %s' %
(qn, directive.__name__, arg, parser.unparse(other_value).strip(),
parser.unparse(first_value).strip()))
return first_value
def visit(self, node):
if not self._ast_depth:
if self._used:
raise ValueError('converter objects cannot be reused')
self._used = True
self._ast_depth += 1
try:
return super(Base, self).visit(node)
finally:
self._ast_depth -= 1
|
DALI-main
|
dali/python/nvidia/dali/_autograph/core/converter.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
DALI-main
|
dali/python/nvidia/dali/_autograph/core/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.