python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
|---|---|---|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for templates module."""
import re
import gast
import unittest
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import origin_info
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import transformer
class TransformerTest(unittest.TestCase):
def _simple_context(self):
entity_info = transformer.EntityInfo(
name='Test_fn',
source_code=None,
source_file=None,
future_features=(),
namespace=None)
return transformer.Context(entity_info, None, None)
def assertSameAnno(self, first, second, key):
self.assertIs(anno.getanno(first, key), anno.getanno(second, key))
def assertDifferentAnno(self, first, second, key):
self.assertIsNot(anno.getanno(first, key), anno.getanno(second, key))
def test_state_tracking(self):
class LoopState(object):
pass
class CondState(object):
pass
class TestTransformer(transformer.Base):
def visit(self, node):
anno.setanno(node, 'loop_state', self.state[LoopState].value)
anno.setanno(node, 'cond_state', self.state[CondState].value)
return super(TestTransformer, self).visit(node)
def visit_While(self, node):
self.state[LoopState].enter()
node = self.generic_visit(node)
self.state[LoopState].exit()
return node
def visit_If(self, node):
self.state[CondState].enter()
node = self.generic_visit(node)
self.state[CondState].exit()
return node
tr = TestTransformer(self._simple_context())
def test_function(a):
a = 1
while a:
_ = 'a'
if a > 2:
_ = 'b'
while True:
raise '1'
if a > 3:
_ = 'c'
while True:
raise '1'
node, _ = parser.parse_entity(test_function, future_features=())
node = tr.visit(node)
fn_body = node.body
outer_while_body = fn_body[1].body
self.assertSameAnno(fn_body[0], outer_while_body[0], 'cond_state')
self.assertDifferentAnno(fn_body[0], outer_while_body[0], 'loop_state')
first_if_body = outer_while_body[1].body
self.assertDifferentAnno(outer_while_body[0], first_if_body[0],
'cond_state')
self.assertSameAnno(outer_while_body[0], first_if_body[0], 'loop_state')
first_inner_while_body = first_if_body[1].body
self.assertSameAnno(first_if_body[0], first_inner_while_body[0],
'cond_state')
self.assertDifferentAnno(first_if_body[0], first_inner_while_body[0],
'loop_state')
second_if_body = outer_while_body[2].body
self.assertDifferentAnno(first_if_body[0], second_if_body[0], 'cond_state')
self.assertSameAnno(first_if_body[0], second_if_body[0], 'loop_state')
second_inner_while_body = second_if_body[1].body
self.assertDifferentAnno(first_inner_while_body[0],
second_inner_while_body[0], 'cond_state')
self.assertDifferentAnno(first_inner_while_body[0],
second_inner_while_body[0], 'loop_state')
def test_state_tracking_context_manager(self):
class CondState(object):
pass
class TestTransformer(transformer.Base):
def visit(self, node):
anno.setanno(node, 'cond_state', self.state[CondState].value)
return super(TestTransformer, self).visit(node)
def visit_If(self, node):
with self.state[CondState]:
return self.generic_visit(node)
tr = TestTransformer(self._simple_context())
def test_function(a):
a = 1
if a > 2:
_ = 'b'
if a < 5:
_ = 'c'
_ = 'd'
node, _ = parser.parse_entity(test_function, future_features=())
node = tr.visit(node)
fn_body = node.body
outer_if_body = fn_body[1].body
self.assertDifferentAnno(fn_body[0], outer_if_body[0], 'cond_state')
self.assertSameAnno(outer_if_body[0], outer_if_body[2], 'cond_state')
inner_if_body = outer_if_body[1].body
self.assertDifferentAnno(inner_if_body[0], outer_if_body[0], 'cond_state')
def test_visit_block_postprocessing(self):
class TestTransformer(transformer.Base):
def _process_body_item(self, node):
if isinstance(node, gast.Assign) and (node.value.id == 'y'):
if_node = gast.If(
gast.Name(
'x', ctx=gast.Load(), annotation=None, type_comment=None),
[node], [])
return if_node, if_node.body
return node, None
def visit_FunctionDef(self, node):
node.body = self.visit_block(
node.body, after_visit=self._process_body_item)
return node
def test_function(x, y):
z = x
z = y
return z
tr = TestTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function, future_features=())
node = tr.visit(node)
self.assertEqual(len(node.body), 2)
self.assertIsInstance(node.body[0], gast.Assign)
self.assertIsInstance(node.body[1], gast.If)
self.assertIsInstance(node.body[1].body[0], gast.Assign)
self.assertIsInstance(node.body[1].body[1], gast.Return)
def test_robust_error_on_list_visit(self):
class BrokenTransformer(transformer.Base):
def visit_If(self, node):
# This is broken because visit expects a single node, not a list, and
# the body of an if is a list.
# Importantly, the default error handling in visit also expects a single
# node. Therefore, mistakes like this need to trigger a type error
# before the visit called here installs its error handler.
# That type error can then be caught by the enclosing call to visit,
# and correctly blame the If node.
self.visit(node.body)
return node
def test_function(x):
if x > 0:
return x
tr = BrokenTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function, future_features=())
with self.assertRaises(ValueError) as cm:
node = tr.visit(node)
obtained_message = str(cm.exception)
expected_message = r'expected "ast.AST", got "\<(type|class) \'list\'\>"'
self.assertRegex(obtained_message, expected_message)
def test_robust_error_on_ast_corruption(self):
# A child class should not be able to be so broken that it causes the error
# handling in `transformer.Base` to raise an exception. Why not? Because
# then the original error location is dropped, and an error handler higher
# up in the call stack gives misleading information.
# Here we test that the error handling in `visit` completes, and blames the
# correct original exception, even if the AST gets corrupted.
class NotANode(object):
pass
class BrokenTransformer(transformer.Base):
def visit_If(self, node):
node.body = NotANode()
raise ValueError('I blew up')
def test_function(x):
if x > 0:
return x
tr = BrokenTransformer(self._simple_context())
node, _ = parser.parse_entity(test_function, future_features=())
with self.assertRaises(ValueError) as cm:
node = tr.visit(node)
obtained_message = str(cm.exception)
# The message should reference the exception actually raised, not anything
# from the exception handler.
expected_substring = 'I blew up'
self.assertIn(expected_substring, obtained_message)
def test_origin_info_propagated_to_new_nodes(self):
class TestTransformer(transformer.Base):
def visit_If(self, node):
return gast.Pass()
tr = TestTransformer(self._simple_context())
def test_fn():
x = 1
if x > 0:
x = 1
return x
node, source = parser.parse_entity(test_fn, future_features=())
origin_info.resolve(node, source, 'test_file', 100, 0)
node = tr.visit(node)
created_pass_node = node.body[1]
# Takes the line number of the if statement.
self.assertEqual(
anno.getanno(created_pass_node, anno.Basic.ORIGIN).loc.lineno, 102)
def test_origin_info_preserved_in_moved_nodes(self):
class TestTransformer(transformer.Base):
def visit_If(self, node):
return node.body
tr = TestTransformer(self._simple_context())
def test_fn():
x = 1
if x > 0:
x = 1
x += 3
return x
node, source = parser.parse_entity(test_fn, future_features=())
origin_info.resolve(node, source, 'test_file', 100, 0)
node = tr.visit(node)
assign_node = node.body[1]
aug_assign_node = node.body[2]
# Keep their original line numbers.
self.assertEqual(
anno.getanno(assign_node, anno.Basic.ORIGIN).loc.lineno, 103)
self.assertEqual(
anno.getanno(aug_assign_node, anno.Basic.ORIGIN).loc.lineno, 104)
class CodeGeneratorTest(unittest.TestCase):
def _simple_context(self):
entity_info = transformer.EntityInfo(
name='test_fn',
source_code=None,
source_file=None,
future_features=(),
namespace=None)
return transformer.Context(entity_info, None, None)
def test_basic_codegen(self):
class TestCodegen(transformer.CodeGenerator):
def visit_Assign(self, node):
self.emit(parser.unparse(node, include_encoding_marker=False))
self.emit('\n')
def visit_Return(self, node):
self.emit(parser.unparse(node, include_encoding_marker=False))
self.emit('\n')
def visit_If(self, node):
self.emit('if ')
# This is just for simplifity. A real generator will walk the tree and
# emit proper code.
self.emit(parser.unparse(node.test, include_encoding_marker=False))
self.emit(' {\n')
self.visit_block(node.body)
self.emit('} else {\n')
self.visit_block(node.orelse)
self.emit('}\n')
tg = TestCodegen(self._simple_context())
def test_fn():
x = 1
if x > 0:
x = 2
if x > 1:
x = 3
return x
node, source = parser.parse_entity(test_fn, future_features=())
origin_info.resolve(node, source, 'test_file', 100, 0)
tg.visit(node)
r = re.compile('.*'.join([
r'x = 1',
r'if \(?x > 0\)? {',
r'x = 2',
r'if \(?x > 1\)? {',
r'x = 3',
r'} else {',
r'}',
r'} else {',
r'}',
r'return x']), re.DOTALL)
self.assertRegex(tg.code_buffer, r)
# TODO(mdan): Test the source map.
|
DALI-main
|
dali/test/python/autograph/pyct/test_transformer.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cache module."""
import unittest
from nvidia.dali._autograph.pyct import cache
class CacheTest(unittest.TestCase):
def test_code_object_cache(self):
def factory(x):
def test_fn():
return x + 1
return test_fn
c = cache.CodeObjectCache()
f1 = factory(1)
dummy = object()
c[f1][1] = dummy
self.assertTrue(c.has(f1, 1))
self.assertFalse(c.has(f1, 2))
self.assertIs(c[f1][1], dummy)
self.assertEqual(len(c), 1)
f2 = factory(2)
self.assertTrue(c.has(f2, 1))
self.assertIs(c[f2][1], dummy)
self.assertEqual(len(c), 1)
def test_unbound_instance_cache(self):
class TestClass(object):
def method(self):
pass
c = cache.UnboundInstanceCache()
o1 = TestClass()
dummy = object()
c[o1.method][1] = dummy
self.assertTrue(c.has(o1.method, 1))
self.assertFalse(c.has(o1.method, 2))
self.assertIs(c[o1.method][1], dummy)
self.assertEqual(len(c), 1)
o2 = TestClass()
self.assertTrue(c.has(o2.method, 1))
self.assertIs(c[o2.method][1], dummy)
self.assertEqual(len(c), 1)
|
DALI-main
|
dali/test/python/autograph/pyct/test_cache.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reaching_fndefs module."""
import unittest
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import cfg
from nvidia.dali._autograph.pyct import naming
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import qual_names
from nvidia.dali._autograph.pyct import transformer
from nvidia.dali._autograph.pyct.static_analysis import activity
from nvidia.dali._autograph.pyct.static_analysis import reaching_definitions
from nvidia.dali._autograph.pyct.static_analysis import reaching_fndefs
class ReachingFndefsAnalyzerTest(unittest.TestCase):
def _parse_and_analyze(self, test_fn):
# TODO(mdan): Use a custom FunctionTransformer here.
node, source = parser.parse_entity(test_fn, future_features=())
entity_info = transformer.EntityInfo(
name=test_fn.__name__,
source_code=source,
source_file=None,
future_features=(),
namespace={})
node = qual_names.resolve(node)
namer = naming.Namer({})
ctx = transformer.Context(entity_info, namer, None)
node = activity.resolve(node, ctx)
graphs = cfg.build(node)
node = reaching_definitions.resolve(node, ctx, graphs)
node = reaching_fndefs.resolve(node, ctx, graphs)
return node
def assertHasFnDefs(self, node):
anno.getanno(node, anno.Static.DEFINED_FNS_IN)
|
DALI-main
|
dali/test/python/autograph/pyct/static_analysis/test_reaching_fndefs.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for activity module."""
import unittest
import gast
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import naming
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import qual_names
from nvidia.dali._autograph.pyct import transformer
from nvidia.dali._autograph.pyct.static_analysis import activity
from nvidia.dali._autograph.pyct.static_analysis import annos
QN = qual_names.QN
NodeAnno = annos.NodeAnno
global_a = 7
global_b = 17
class ScopeTest(unittest.TestCase):
def assertMissing(self, qn, scope):
self.assertNotIn(qn, scope.read)
self.assertNotIn(qn, scope.modified)
def assertReadOnly(self, qn, scope):
self.assertIn(qn, scope.read)
self.assertNotIn(qn, scope.modified)
def assertWriteOnly(self, qn, scope):
self.assertNotIn(qn, scope.read)
self.assertIn(qn, scope.modified)
def assertReadWrite(self, qn, scope):
self.assertIn(qn, scope.read)
self.assertIn(qn, scope.modified)
def test_copy_from(self):
scope = activity.Scope(None)
scope.modified.add(QN('foo'))
other = activity.Scope(None)
other.copy_from(scope)
self.assertWriteOnly(QN('foo'), other)
scope.modified.add(QN('bar'))
scope.copy_from(other)
self.assertMissing(QN('bar'), scope)
def test_merge_from(self):
scope = activity.Scope(None)
other = activity.Scope(None)
for col in (scope.modified, scope.read, scope.bound, scope.deleted):
col.add(QN('foo'))
for col in (other.modified, other.read, other.bound, other.deleted):
col.add(QN('foo'))
col.add(QN('bar'))
scope.merge_from(other)
self.assertReadWrite(QN('foo'), scope)
self.assertReadWrite(QN('bar'), scope)
self.assertIn(QN('foo'), scope.bound)
self.assertIn(QN('bar'), scope.bound)
self.assertIn(QN('foo'), scope.deleted)
self.assertIn(QN('bar'), scope.deleted)
def test_copy_of(self):
scope = activity.Scope(None)
scope.read.add(QN('foo'))
other = activity.Scope.copy_of(scope)
self.assertReadOnly(QN('foo'), other)
child_scope = activity.Scope(scope)
child_scope.read.add(QN('bar'))
other = activity.Scope.copy_of(child_scope)
self.assertReadOnly(QN('bar'), other)
def test_referenced(self):
scope = activity.Scope(None)
scope.read.add(QN('a'))
child = activity.Scope(scope)
child.read.add(QN('b'))
child2 = activity.Scope(child, isolated=False)
child2.read.add(QN('c'))
child2.finalize()
child.finalize()
scope.finalize()
self.assertIn(QN('c'), child2.referenced)
self.assertIn(QN('b'), child2.referenced)
self.assertIn(QN('a'), child2.referenced)
self.assertIn(QN('c'), child.referenced)
self.assertIn(QN('b'), child.referenced)
self.assertIn(QN('a'), child.referenced)
class ActivityAnalyzerTestBase(unittest.TestCase):
def _parse_and_analyze(self, test_fn):
# TODO(mdan): Use a custom FunctionTransformer here.
node, source = parser.parse_entity(test_fn, future_features=())
entity_info = transformer.EntityInfo(
name=test_fn.__name__,
source_code=source,
source_file=None,
future_features=(),
namespace={})
node = qual_names.resolve(node)
namer = naming.Namer({})
ctx = transformer.Context(entity_info, namer, None)
node = activity.resolve(node, ctx)
return node, entity_info
def assertSymbolSetsAre(self, expected, actual, name):
expected = set(expected)
actual = set(str(s) for s in actual)
self.assertSetEqual(
expected, actual, 'for symbol set: %s\n'
' Expected: %s\n'
' Got: %s\n'
' Missing: %s\n'
' Extra: %s\n' % (name.upper(), expected, actual,
expected - actual, actual - expected))
def assertScopeIs(self, scope, used, modified):
"""Assert the scope contains specific used, modified & created variables."""
self.assertSymbolSetsAre(used, scope.read, 'read')
self.assertSymbolSetsAre(modified, scope.modified, 'modified')
class ActivityAnalyzerTest(ActivityAnalyzerTestBase):
def test_import(self):
def test_fn():
import a, b.x, y as c, z.u as d # pylint:disable=g-multiple-import,g-import-not-at-top,unused-variable # noqa: E401,F401,E501
node, _ = self._parse_and_analyze(test_fn)
scope = anno.getanno(node.body[0], anno.Static.SCOPE)
self.assertScopeIs(scope, (), ('a', 'b', 'c', 'd'))
def test_import_from(self):
def test_fn():
from x import a # pylint:disable=g-import-not-at-top,unused-variable # noqa: F401
from y import z as b # pylint:disable=g-import-not-at-top,unused-variable # noqa: F401
node, _ = self._parse_and_analyze(test_fn)
scope = anno.getanno(node.body[0], anno.Static.SCOPE)
self.assertScopeIs(scope, (), ('a',))
scope = anno.getanno(node.body[1], anno.Static.SCOPE)
self.assertScopeIs(scope, (), ('b',))
def test_print_statement(self):
def test_fn(a):
b = 0
c = 1
print(a, b)
return c
node, _ = self._parse_and_analyze(test_fn)
print_node = node.body[2]
if isinstance(print_node, gast.Print):
# Python 2
print_args_scope = anno.getanno(print_node, NodeAnno.ARGS_SCOPE)
else:
# Python 3
assert isinstance(print_node, gast.Expr)
# The call node should be the one being annotated.
print_node = print_node.value
print_args_scope = anno.getanno(print_node, NodeAnno.ARGS_SCOPE)
# We basically need to detect which variables are captured by the call
# arguments.
self.assertScopeIs(print_args_scope, ('a', 'b'), ())
def test_call_args(self):
def test_fn(a):
b = 0
c = 1
foo(a, b) # pylint:disable=undefined-variable # noqa: F821
return c
node, _ = self._parse_and_analyze(test_fn)
call_node = node.body[2].value
# We basically need to detect which variables are captured by the call
# arguments.
self.assertScopeIs(
anno.getanno(call_node, NodeAnno.ARGS_SCOPE), ('a', 'b'), ())
def test_call_args_attributes(self):
def foo(*_):
pass
def test_fn(a):
a.c = 0
foo(a.b, a.c)
return a.d
node, _ = self._parse_and_analyze(test_fn)
call_node = node.body[1].value
self.assertScopeIs(
anno.getanno(call_node, NodeAnno.ARGS_SCOPE), ('a', 'a.b', 'a.c'), ())
def test_call_args_subscripts(self):
def foo(*_):
pass
def test_fn(a):
b = 1
c = 2
foo(a[0], a[b])
return a[c]
node, _ = self._parse_and_analyze(test_fn)
call_node = node.body[2].value
self.assertScopeIs(
anno.getanno(call_node, NodeAnno.ARGS_SCOPE),
('a', 'a[0]', 'a[b]', 'b'), ())
def test_while(self):
def test_fn(a):
b = a
while b > 0:
c = b
b -= 1
return b, c
node, _ = self._parse_and_analyze(test_fn)
while_node = node.body[1]
self.assertScopeIs(
anno.getanno(while_node, NodeAnno.BODY_SCOPE), ('b',), ('b', 'c'))
self.assertScopeIs(
anno.getanno(while_node, NodeAnno.BODY_SCOPE).parent, ('a', 'b', 'c'),
('b', 'c'))
self.assertScopeIs(
anno.getanno(while_node, NodeAnno.COND_SCOPE), ('b',), ())
def test_for(self):
def test_fn(a):
b = a
for _ in a:
c = b
b -= 1
return b, c
node, _ = self._parse_and_analyze(test_fn)
for_node = node.body[1]
self.assertScopeIs(
anno.getanno(for_node, NodeAnno.ITERATE_SCOPE), (), ('_'))
self.assertScopeIs(
anno.getanno(for_node, NodeAnno.BODY_SCOPE), ('b',), ('b', 'c'))
self.assertScopeIs(
anno.getanno(for_node, NodeAnno.BODY_SCOPE).parent, ('a', 'b', 'c'),
('b', 'c', '_'))
def test_if(self):
def test_fn(x):
if x > 0:
x = -x
y = 2 * x
z = -y
else:
x = 2 * x
y = -x
u = -y
return z, u
node, _ = self._parse_and_analyze(test_fn)
if_node = node.body[0]
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.BODY_SCOPE), ('x', 'y'), ('x', 'y', 'z'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.BODY_SCOPE).parent, ('x', 'y', 'z', 'u'),
('x', 'y', 'z', 'u'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE), ('x', 'y'),
('x', 'y', 'u'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE).parent,
('x', 'y', 'z', 'u'), ('x', 'y', 'z', 'u'))
def test_if_attributes(self):
def test_fn(a):
if a > 0:
a.b = -a.c
d = 2 * a
else:
a.b = a.c
d = 1
return d
node, _ = self._parse_and_analyze(test_fn)
if_node = node.body[0]
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.BODY_SCOPE), ('a', 'a.c'), ('a.b', 'd'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE), ('a', 'a.c'),
('a.b', 'd'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.BODY_SCOPE).parent, ('a', 'a.c', 'd'),
('a.b', 'd'))
def test_if_subscripts(self):
def test_fn(a, b, c, e):
if a > 0:
a[b] = -a[c]
d = 2 * a
else:
a[0] = e
d = 1
return d
node, _ = self._parse_and_analyze(test_fn)
if_node = node.body[0]
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.BODY_SCOPE), ('a', 'b', 'c', 'a[c]'),
('a[b]', 'd'))
# TODO(mdan): Should subscript writes (a[0] = 1) be considered to read "a"?
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE), ('a', 'e'), ('a[0]', 'd'))
self.assertScopeIs(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE).parent,
('a', 'b', 'c', 'd', 'e', 'a[c]'), ('d', 'a[b]', 'a[0]'))
def test_nested_if(self):
def test_fn(b):
if b > 0:
if b < 5:
a = b
else:
a = b * b
return a
node, _ = self._parse_and_analyze(test_fn)
inner_if_node = node.body[0].body[0]
self.assertScopeIs(
anno.getanno(inner_if_node, NodeAnno.BODY_SCOPE), ('b',), ('a',))
self.assertScopeIs(
anno.getanno(inner_if_node, NodeAnno.ORELSE_SCOPE), ('b',), ('a',))
def test_nested_function(self):
def test_fn(a):
def f(x):
y = x * x
return y
return f(a)
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'f'), ('f',))
fn_def_node = node.body[0]
scope = anno.getanno(fn_def_node, anno.Static.SCOPE)
self.assertScopeIs(scope, (), ('f'))
scope = anno.getanno(fn_def_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('x', 'y'), ('y',))
scope = anno.getanno(fn_def_node, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('x', 'y'), ('y',))
self.assertSymbolSetsAre(('x', 'y'), scope.bound, 'BOUND')
def test_nested_lambda(self):
def test_fn(a):
return lambda x: (x * a)
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a',), ())
return_node = node.body[0]
scope = anno.getanno(return_node, anno.Static.SCOPE)
self.assertScopeIs(scope, ('a',), ())
lam_def_node = return_node.value
scope = anno.getanno(lam_def_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'x'), ())
scope = anno.getanno(lam_def_node, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'x'), ())
self.assertSymbolSetsAre(('x',), scope.bound, 'BOUND')
def test_nested_function_arg_defaults(self):
def test_fn(a):
def f(x=a):
y = x * x
return y
return f(a)
node, _ = self._parse_and_analyze(test_fn)
fn_def_node = node.body[0]
self.assertScopeIs(
anno.getanno(fn_def_node, anno.Static.SCOPE), ('a',), ('f',))
scope = anno.getanno(fn_def_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('x', 'y'), ('y',))
scope = anno.getanno(fn_def_node, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('x', 'y'), ('y',))
self.assertSymbolSetsAre(('x', 'y'), scope.bound, 'BOUND')
def test_constructor_attributes(self):
class TestClass(object):
def __init__(self, a):
self.b = a
self.b.c = 1
node, _ = self._parse_and_analyze(TestClass)
init_node = node.body[0]
self.assertScopeIs(
anno.getanno(init_node, NodeAnno.BODY_SCOPE), ('self', 'a', 'self.b'),
('self', 'self.b', 'self.b.c'))
def test_aug_assign_subscripts(self):
def test_fn(a):
a[0] += 1
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(
anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('a', 'a[0]'), ('a[0]',))
def test_return_vars_are_read(self):
def test_fn(a, b, c): # pylint: disable=unused-argument # noqa: F841
return c
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('c',), ())
self.assertScopeIs(
anno.getanno(node.body[0], anno.Static.SCOPE), ('c',), ())
def test_raise_names_are_read(self):
def test_fn(a, b, c): # pylint: disable=unused-argument # noqa: F841
raise b
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('b',), ())
self.assertScopeIs(
anno.getanno(node.body[0], anno.Static.SCOPE), ('b',), ())
def test_except_exposes_names(self):
def test_fn(a, b, c): # pylint: disable=unused-argument
try:
pass
except: # pylint: disable=bare-except # noqa: E722
b = c # noqa: F841
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(
anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('c',), ('b',))
def test_except_hides_exception_var_name(self):
def test_fn(a, b, c): # pylint: disable=unused-argument
try:
pass
except a as e:
b = e # noqa: F841
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(
anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('a',), ('b',))
def test_aug_assign(self):
def test_fn(a, b):
a += b
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(
anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('a', 'b'), ('a'))
def test_aug_assign_rvalues(self):
a = dict(bar=3)
def foo():
return a
def test_fn(x):
foo()['bar'] += x
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
self.assertScopeIs(
anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('foo', 'x'), ())
def test_lambda(self):
def test_fn(a, b):
return lambda: (a + b)
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b'), ())
lam_def_node = node.body[0].value
scope = anno.getanno(lam_def_node, anno.Static.SCOPE)
self.assertScopeIs(scope, (), ())
scope = anno.getanno(lam_def_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b'), ())
scope = anno.getanno(lam_def_node, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b'), ())
self.assertSymbolSetsAre((), scope.bound, 'BOUND')
scope = anno.getanno(lam_def_node.args, anno.Static.SCOPE)
self.assertSymbolSetsAre((), scope.params.keys(), 'lambda params')
def test_lambda_params_args(self):
def test_fn(a, b): # pylint: disable=unused-argument # noqa: F841
return lambda a: a + b
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
# Note: `a` in `a + b` is not "read" here because it's hidden by the `a`
# argument.
self.assertScopeIs(scope, ('b',), ())
lam_def_node = node.body[0].value
scope = anno.getanno(lam_def_node, anno.Static.SCOPE)
self.assertScopeIs(scope, (), ())
scope = anno.getanno(lam_def_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b'), ())
scope = anno.getanno(lam_def_node, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b'), ())
self.assertSymbolSetsAre(('a',), scope.bound, 'BOUND')
scope = anno.getanno(lam_def_node.args, anno.Static.SCOPE)
self.assertSymbolSetsAre(('a',), scope.params.keys(), 'lambda params')
def test_lambda_params_arg_defaults(self):
def test_fn(a, b, c): # pylint: disable=unused-argument # noqa: F841
return lambda b=c: a + b
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
# Note: `b` is not "read" here because it's hidden by the argument.
self.assertScopeIs(scope, ('a', 'c'), ())
lam_def_node = node.body[0].value
scope = anno.getanno(lam_def_node, anno.Static.SCOPE)
self.assertScopeIs(scope, ('c',), ())
scope = anno.getanno(lam_def_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b'), ())
scope = anno.getanno(lam_def_node, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b'), ())
self.assertSymbolSetsAre(('b',), scope.bound, 'BOUND')
scope = anno.getanno(lam_def_node.args, anno.Static.SCOPE)
self.assertSymbolSetsAre(('b',), scope.params.keys(), 'lambda params')
def test_lambda_complex(self):
def test_fn(a, b, c, d, e): # pylint: disable=unused-argument # noqa: F841
a = (lambda a, b, c=e: a + b + c)(d, 1, 2) + b # noqa: F841
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('d', 'b', 'e'), ('a',))
lam_def_node = node.body[0].value.left.func
scope = anno.getanno(lam_def_node, anno.Static.SCOPE)
self.assertScopeIs(scope, ('e',), ())
scope = anno.getanno(lam_def_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b', 'c'), ())
scope = anno.getanno(lam_def_node, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b', 'c'), ())
self.assertSymbolSetsAre(('a', 'b', 'c'), scope.bound, 'BOUND')
scope = anno.getanno(lam_def_node.args, anno.Static.SCOPE)
self.assertSymbolSetsAre(
('a', 'b', 'c'), scope.params.keys(), 'lambda params')
def test_lambda_nested(self):
def test_fn(a, b, c, d, e, f): # pylint: disable=unused-argument # noqa: F841
a = lambda a, b: d(lambda b=f: a + b + c) # pylint: disable=undefined-variable # noqa: F841
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('d', 'c', 'f'), ('a',))
outer_lam_def = node.body[0].value
scope = anno.getanno(outer_lam_def, anno.Static.SCOPE)
self.assertScopeIs(scope, (), ())
scope = anno.getanno(outer_lam_def, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('d', 'f', 'a', 'c'), ())
scope = anno.getanno(outer_lam_def, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('d', 'f', 'a', 'c'), ())
self.assertSymbolSetsAre(('a', 'b'), scope.bound, 'BOUND')
scope = anno.getanno(outer_lam_def.args, anno.Static.SCOPE)
self.assertSymbolSetsAre(('a', 'b'), scope.params.keys(), 'lambda params')
inner_lam_def = outer_lam_def.body.args[0]
scope = anno.getanno(inner_lam_def, anno.Static.SCOPE)
self.assertScopeIs(scope, ('f',), ())
scope = anno.getanno(inner_lam_def, NodeAnno.BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b', 'c'), ())
scope = anno.getanno(inner_lam_def, NodeAnno.ARGS_AND_BODY_SCOPE)
self.assertScopeIs(scope, ('a', 'b', 'c'), ())
self.assertSymbolSetsAre(('b',), scope.bound, 'BOUND')
scope = anno.getanno(inner_lam_def.args, anno.Static.SCOPE)
self.assertSymbolSetsAre(('b',), scope.params.keys(), 'lambda params')
def test_comprehension_targets_are_isolated(self):
def test_fn(a):
b = {c for c in a} # pylint:disable=unused-variable # noqa: F841
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('a',), ('b',))
def test_comprehension_targets_are_isolated_list_function_w_generator(self):
def test_fn(a):
b = list(c for c in a) # pylint:disable=unused-variable # noqa: F841
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('a', 'list'), ('b',))
def test_list_comprehension_targets_are_sometimes_isolated(self):
def test_fn(a):
b = [c for c in a] # pylint:disable=unused-variable # noqa: F841
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('a',), ('b',))
def test_comprehension_targets_are_isolated_in_augassign(self):
def test_fn(a, b):
b += [c for c in a] # pylint:disable=unused-variable
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('a', 'b'), ('b',))
def test_comprehension_generator_order(self):
def test_fn(a, b, c): # pylint:disable=unused-argument
e = {d: (a, b) for (a, b) in c for d in b} # pylint:disable=unused-variable,g-complex-comprehension # noqa: F841,E501
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('c',), ('e',))
def test_global_symbol(self):
def test_fn(c):
global global_a
global global_b
global_a = global_b + c
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('global_a', 'global_b', 'c'), ('global_a',))
self.assertSetEqual(body_scope.globals, set(
(QN('global_a'), QN('global_b'))))
global_a_scope = anno.getanno(fn_node.body[0], anno.Static.SCOPE)
self.assertScopeIs(global_a_scope, ('global_a',), ())
def test_nonlocal_symbol(self):
nonlocal_a = 3
nonlocal_b = 13
def test_fn(c):
nonlocal nonlocal_a
nonlocal nonlocal_b
nonlocal_a = nonlocal_b + c
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(
body_scope, ('nonlocal_a', 'nonlocal_b', 'c'), ('nonlocal_a',))
nonlocal_a_scope = anno.getanno(fn_node.body[0], anno.Static.SCOPE)
self.assertScopeIs(nonlocal_a_scope, ('nonlocal_a',), ())
def test_annotated_assign(self):
b = int
def test_fn(c):
a: b = c
return a
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('b', 'c', 'a'), ('a',))
self.assertSymbolSetsAre(('b',), body_scope.annotations, 'annotations')
ann_assign_scope = anno.getanno(fn_node.body[0], anno.Static.SCOPE)
self.assertScopeIs(ann_assign_scope, ('b', 'c'), ('a',))
self.assertSymbolSetsAre(
('b',), ann_assign_scope.annotations, 'annotations')
def test_pure_definition(self):
b = int
def test_fn():
a: b
return a # noqa: F821
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('b', 'a'), ('a',))
self.assertSymbolSetsAre(('b',), body_scope.annotations, 'annotations')
ann_assign_scope = anno.getanno(fn_node.body[0], anno.Static.SCOPE)
self.assertScopeIs(ann_assign_scope, ('b',), ('a',))
self.assertSymbolSetsAre(
('b',), ann_assign_scope.annotations, 'annotations')
def test_function_def_annotations(self):
b = int
c = int
def test_fn(a: b) -> c:
return a
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
fn_scope = anno.getanno(fn_node, anno.Static.SCOPE)
self.assertScopeIs(fn_scope, ('b', 'c'), ('test_fn',))
self.assertSymbolSetsAre(('b', 'c'), fn_scope.annotations, 'annotations')
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('a',), ())
self.assertSymbolSetsAre((), body_scope.annotations, 'annotations')
def test_class_definition_basic(self):
def test_fn(a, b):
class C(a(b)):
d = 1
return C
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('a', 'b', 'C'), ('C',))
def test_class_definition_isolates_method_writes(self):
def test_fn(a, b, c):
class C(a(b)):
d = 1
def e(self):
f = c + 1
return f
return C
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('a', 'b', 'C', 'c'), ('C',))
|
DALI-main
|
dali/test/python/autograph/pyct/static_analysis/test_activity.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
DALI-main
|
dali/test/python/autograph/pyct/static_analysis/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reaching_definitions module."""
import unittest
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import cfg
from nvidia.dali._autograph.pyct import naming
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import qual_names
from nvidia.dali._autograph.pyct import transformer
from nvidia.dali._autograph.pyct.static_analysis import activity
from nvidia.dali._autograph.pyct.static_analysis import reaching_definitions
global_a = 7
global_b = 17
class ReachingDefinitionsAnalyzerTestBase(unittest.TestCase):
def _parse_and_analyze(self, test_fn):
# TODO(mdan): Use a custom FunctionTransformer here.
node, source = parser.parse_entity(test_fn, future_features=())
entity_info = transformer.EntityInfo(
name=test_fn.__name__,
source_code=source,
source_file=None,
future_features=(),
namespace={})
node = qual_names.resolve(node)
namer = naming.Namer({})
ctx = transformer.Context(entity_info, namer, None)
node = activity.resolve(node, ctx)
graphs = cfg.build(node)
node = reaching_definitions.resolve(node, ctx, graphs,
reaching_definitions.Definition)
return node
def assertHasDefs(self, node, num):
defs = anno.getanno(node, anno.Static.DEFINITIONS)
self.assertEqual(len(defs), num)
for r in defs:
self.assertIsInstance(r, reaching_definitions.Definition)
def assertHasDefinedIn(self, node, expected):
defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
defined_in_str = set(str(v) for v in defined_in)
if not expected:
expected = ()
if not isinstance(expected, tuple):
expected = (expected,)
self.assertSetEqual(defined_in_str, set(expected))
def assertSameDef(self, first, second):
self.assertHasDefs(first, 1)
self.assertHasDefs(second, 1)
self.assertIs(
anno.getanno(first, anno.Static.DEFINITIONS)[0],
anno.getanno(second, anno.Static.DEFINITIONS)[0])
def assertNotSameDef(self, first, second):
self.assertHasDefs(first, 1)
self.assertHasDefs(second, 1)
self.assertIsNot(
anno.getanno(first, anno.Static.DEFINITIONS)[0],
anno.getanno(second, anno.Static.DEFINITIONS)[0])
class ReachingDefinitionsAnalyzerTest(ReachingDefinitionsAnalyzerTestBase):
def test_conditional(self):
def test_fn(a, b):
a = []
if b:
a = []
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].test, 1)
self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value, 2)
self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
def test_try_in_conditional(self):
def test_fn(a, b): # pylint:disable=unused-argument
a = []
if b:
try:
pass
except: # pylint:disable=bare-except # noqa: E722
pass
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
self.assertHasDefinedIn(fn_body[1].body[0], ('a', 'b'))
def test_conditional_in_try_in_conditional(self):
def test_fn(a, b):
a = []
if b:
try:
if b:
a = []
except TestException: # pylint:disable=undefined-variable,unused-variable # noqa: F821,F841
pass
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
self.assertHasDefinedIn(fn_body[1].body[0], ('a', 'b'))
# Note: `TestException` and `e` are not tracked.
self.assertHasDefinedIn(fn_body[1].body[0].body[0], ('a', 'b'))
def test_conditional_in_except_in_conditional(self):
def test_fn(a, b):
a = []
if b:
try:
pass
except TestException as e: # pylint:disable=undefined-variable,unused-variable # noqa: F821,F841,E501
if b:
a = []
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
self.assertHasDefinedIn(fn_body[1].body[0], ('a', 'b'))
# Note: `TestException` and `e` are not tracked.
self.assertHasDefinedIn(fn_body[1].body[0].handlers[0].body[0], ('a', 'b'))
def test_while(self):
def test_fn(a):
max(a)
while True:
a = a
a = a
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].value.args[0], 1)
self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].body[1].targets[0], 1)
self.assertHasDefs(fn_body[1].body[1].value, 1)
# The loop does have an invariant test, but the CFG doesn't know that.
self.assertHasDefs(fn_body[1].body[0].value, 2)
self.assertHasDefs(fn_body[2].value, 2)
def test_while_else(self):
def test_fn(x, i):
y = 0
while x:
x += i
if i:
break
else:
y = 1
return x, y
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].test, 2)
self.assertHasDefs(fn_body[1].body[0].target, 1)
self.assertHasDefs(fn_body[1].body[1].test, 1)
self.assertHasDefs(fn_body[1].orelse[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value.elts[0], 2)
self.assertHasDefs(fn_body[2].value.elts[1], 2)
def test_for_else(self):
def test_fn(x, i):
y = 0
for i in x:
x += i
if i:
break
else:
continue
else:
y = 1
return x, y
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].target, 1)
self.assertHasDefs(fn_body[1].body[0].target, 1)
self.assertHasDefs(fn_body[1].body[1].test, 1)
self.assertHasDefs(fn_body[1].orelse[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value.elts[0], 2)
self.assertHasDefs(fn_body[2].value.elts[1], 2)
def test_nested_functions(self):
def test_fn(a, b):
a = []
if b:
a = []
def foo():
return a
foo()
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
def_of_a_in_if = fn_body[1].body[0].targets[0]
self.assertHasDefs(fn_body[0].targets[0], 1)
self.assertHasDefs(fn_body[1].test, 1)
self.assertHasDefs(def_of_a_in_if, 1)
self.assertHasDefs(fn_body[2].value, 2)
inner_fn_body = fn_body[1].body[1].body
def_of_a_in_foo = inner_fn_body[0].value
# Even though `a` is visible in the inner functio above, the late binding
# makes it impossible to assume that the same value will be visible at
# call time.
self.assertHasDefs(def_of_a_in_foo, 0)
def test_nested_functions_isolation(self):
def test_fn(a):
a = 0
def child():
a = 1
return a
child()
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
parent_return = fn_body[3]
child_return = fn_body[1].body[1]
# The assignment `a = 1` makes `a` local to `child`.
self.assertNotSameDef(parent_return.value, child_return.value)
def test_function_call_in_with(self):
def foo(_):
pass
def test_fn(a):
with foo(a):
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[0].items[0].context_expr.func, 0)
self.assertHasDefs(fn_body[0].items[0].context_expr.args[0], 1)
def test_mutation_subscript(self):
def test_fn(a):
l = []
l[0] = a
return l
node = self._parse_and_analyze(test_fn)
fn_body = node.body
creation = fn_body[0].targets[0]
mutation = fn_body[1].targets[0].value
use = fn_body[2].value
self.assertSameDef(creation, mutation)
self.assertSameDef(creation, use)
def test_deletion_partial(self):
def test_fn(a):
a = 0
if a:
del a
else:
a = 1
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
first_def = fn_body[0].targets[0]
second_def = fn_body[1].orelse[0].targets[0]
use = fn_body[2].value
self.assertNotSameDef(use, first_def)
self.assertSameDef(use, second_def)
def test_deletion_total(self):
def test_fn(a):
if a:
a = 0
else:
a = 1
del a
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
use = fn_body[2].value
self.assertHasDefs(use, 0)
def test_replacement(self):
def foo(a):
return a
def test_fn(a):
a = foo(a)
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
param = node.args.args[0]
source = fn_body[0].value.args[0]
target = fn_body[0].targets[0]
retval = fn_body[1].value
self.assertSameDef(param, source)
self.assertNotSameDef(source, target)
self.assertSameDef(target, retval)
def test_comprehension_leaking(self):
def test_fn(a):
_ = [x for x in a]
return x # pylint:disable=undefined-loop-variable # noqa: F821
node = self._parse_and_analyze(test_fn)
fn_body = node.body
listcomp_target = fn_body[0].value.generators[0].target # noqa: F841
retval = fn_body[1].value
# Python2 leaks list comprehension symbols. Python3 doesn't.
# For details, see:
# https://stackoverflow.com/questions/4198906/list-comprehension-rebinds-names-even-after-scope-of-comprehension-is-this-righ
self.assertHasDefs(retval, 0)
def test_function_definition(self):
def test_fn():
def a():
pass
if a: # pylint:disable=using-constant-test
a = None
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[1].test, 1)
self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value, 2)
self.assertHasDefinedIn(fn_body[1], ('a',))
def test_definitions_in_except_block(self):
def test_fn():
try:
pass
except ValueError:
a = None
if a: # pylint:disable=using-constant-test
a = None
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[1].test, 1)
self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value, 2)
self.assertHasDefinedIn(fn_body[1], ('a',))
def test_definitions_in_except_block_of_raising_try(self):
def test_fn():
try:
raise ValueError()
except ValueError:
a = None
if a: # pylint:disable=using-constant-test
a = None
return a
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[1].test, 1)
self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
self.assertHasDefs(fn_body[2].value, 2)
self.assertHasDefinedIn(fn_body[1], ('a',))
def test_global(self):
def test_fn():
global global_a
global global_b
if global_a:
global_b = []
return global_a, global_b
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[2].test, 1)
self.assertHasDefs(fn_body[2].body[0].targets[0], 1)
self.assertHasDefs(fn_body[3].value.elts[0], 1)
self.assertHasDefs(fn_body[3].value.elts[1], 2)
self.assertSameDef(fn_body[2].test, fn_body[3].value.elts[0])
self.assertHasDefinedIn(fn_body[2], ('global_a', 'global_b'))
def test_nonlocal(self):
a = 3
b = 13
def test_fn():
nonlocal a
nonlocal b
if a:
b = []
return a, b
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[2].test, 1)
self.assertHasDefs(fn_body[2].body[0].targets[0], 1)
self.assertHasDefs(fn_body[3].value.elts[0], 1)
self.assertHasDefs(fn_body[3].value.elts[1], 2)
self.assertSameDef(fn_body[2].test, fn_body[3].value.elts[0])
self.assertHasDefinedIn(fn_body[2], ('a', 'b'))
def test_nonlocal_in_nested_function(self):
a = 3
b = 13
def test_fn():
a = 3
b = 13
def local_fn():
nonlocal a, b
if a:
b = []
return a, b
return local_fn()
node = self._parse_and_analyze(test_fn)
local_body = node.body[2].body
self.assertHasDefs(local_body[1].test, 1)
self.assertHasDefs(local_body[1].body[0].targets[0], 1)
self.assertHasDefs(local_body[2].value.elts[0], 1)
self.assertHasDefs(local_body[2].value.elts[1], 2)
self.assertSameDef(local_body[1].test, local_body[2].value.elts[0])
# Note: the function name is visible inside the function body. But it's
# a closure variable, not a local.
#
# Example:
#
# >>> def f():
# ... print(f)
# >>> g = f
# >>> f = 'something else'
# >>> g()
# something else
#
self.assertHasDefinedIn(local_body[1], ('a', 'b'))
class ReachingDefinitionsAnalyzerTestPy3(ReachingDefinitionsAnalyzerTestBase):
"""Tests which can only run in Python 3."""
def test_nonlocal(self):
a = 3
b = 13
def test_fn():
nonlocal a
nonlocal b
if a:
b = []
return a, b
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasDefs(fn_body[2].test, 1)
self.assertHasDefs(fn_body[2].body[0].targets[0], 1)
self.assertHasDefs(fn_body[3].value.elts[0], 1)
self.assertHasDefs(fn_body[3].value.elts[1], 2)
self.assertSameDef(fn_body[2].test, fn_body[3].value.elts[0])
self.assertHasDefinedIn(fn_body[2], ('a', 'b'))
def test_nonlocal_in_nested_function(self):
a = 3
b = 13
def test_fn():
a = 3
b = 13
def local_fn():
nonlocal a, b
if a:
b = []
return a, b
return local_fn()
node = self._parse_and_analyze(test_fn)
local_body = node.body[2].body
self.assertHasDefs(local_body[1].test, 1)
self.assertHasDefs(local_body[1].body[0].targets[0], 1)
self.assertHasDefs(local_body[2].value.elts[0], 1)
self.assertHasDefs(local_body[2].value.elts[1], 2)
self.assertSameDef(local_body[1].test, local_body[2].value.elts[0])
# Note: the function name is visible inside the function body. But it's
# a closure variable, not a local.
#
# Example:
#
# >>> def f():
# ... print(f)
# >>> g = f
# >>> f = 'something else'
# >>> g()
# something else
#
self.assertHasDefinedIn(local_body[1], ('a', 'b'))
|
DALI-main
|
dali/test/python/autograph/pyct/static_analysis/test_reaching_definitions.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for liveness module."""
import unittest
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import cfg
from nvidia.dali._autograph.pyct import naming
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import qual_names
from nvidia.dali._autograph.pyct import transformer
from nvidia.dali._autograph.pyct.static_analysis import activity
from nvidia.dali._autograph.pyct.static_analysis import liveness
from nvidia.dali._autograph.pyct.static_analysis import reaching_fndefs
global_a = 7
global_b = 17
class LivenessAnalyzerTestBase(unittest.TestCase):
def _parse_and_analyze(self, test_fn):
# TODO(mdan): Use a custom FunctionTransformer here.
node, source = parser.parse_entity(test_fn, future_features=())
entity_info = transformer.EntityInfo(
name=test_fn.__name__,
source_code=source,
source_file=None,
future_features=(),
namespace={})
node = qual_names.resolve(node)
namer = naming.Namer({})
ctx = transformer.Context(entity_info, namer, None)
node = activity.resolve(node, ctx)
graphs = cfg.build(node)
node = reaching_fndefs.resolve(node, ctx, graphs)
node = liveness.resolve(node, ctx, graphs)
return node
def assertHasLiveOut(self, node, expected):
live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
live_out_strs = set(str(v) for v in live_out)
if not expected:
expected = ()
if not isinstance(expected, tuple):
expected = (expected,)
self.assertSetEqual(live_out_strs, set(expected))
def assertHasLiveIn(self, node, expected):
live_in = anno.getanno(node, anno.Static.LIVE_VARS_IN)
live_in_strs = set(str(v) for v in live_in)
if not expected:
expected = ()
if not isinstance(expected, tuple):
expected = (expected,)
self.assertSetEqual(live_in_strs, set(expected))
class LivenessAnalyzerTest(LivenessAnalyzerTestBase):
def test_live_out_try_block(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
try:
pass
except: # pylint:disable=bare-except # noqa: E722
pass
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'x')
self.assertHasLiveOut(fn_body[0].body[0], 'x')
def test_live_out_if_inside_except(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
try:
pass
except: # pylint:disable=bare-except # noqa: E722
if b > 0:
x = b
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'x')
self.assertHasLiveOut(fn_body[0].body[0], 'x')
self.assertHasLiveOut(fn_body[0].body[0].handlers[0].body[0], 'x')
def test_live_out_stacked_if(self):
def test_fn(x, a):
if a > 0:
x = 0
if a > 1:
x = 1
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ('a', 'x'))
self.assertHasLiveOut(fn_body[1], 'x')
def test_live_out_stacked_if_else(self):
def test_fn(x, a):
if a > 0:
x = 0
if a > 1:
x = 1
else:
x = 2
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'a')
self.assertHasLiveOut(fn_body[1], 'x')
def test_live_out_for_basic(self):
def test_fn(x, a):
for i in range(a):
x += i
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'x')
def test_live_out_for_iterate(self):
def test_fn(x, a):
for i in range(a):
x += i
return x, i # pylint:disable=undefined-loop-variable
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ('x', 'i'))
def test_live_out_attributes(self):
def test_fn(x, a):
if a > 0:
x.y = 0
return x.y
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ('x.y', 'x'))
def test_live_out_nested_functions(self):
def test_fn(a, b):
if b:
a = []
def foo():
return a
foo()
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'a')
def test_live_out_nested_functions_defined_ahead(self):
def test_fn(a, b):
def foo():
return a
if b:
a = []
return foo
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[1], ('a', 'foo'))
def test_live_out_nested_functions_defined_after(self):
def test_fn(a, b):
if b:
a = []
def foo():
return a
return foo
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ('a',))
def test_live_out_lambda(self):
def test_fn(a, b):
if b:
a = []
foo = lambda: a
if b:
pass
return foo
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ('a', 'b'))
self.assertHasLiveOut(fn_body[2], ('foo',))
def test_live_out_nested_functions_hidden_by_argument(self):
def test_fn(b):
def foo(a):
return a
if b:
a = [] # pylint:disable=unused-variable # noqa: F841
return foo
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[1], ('foo'))
def test_live_out_nested_functions_isolation(self):
def test_fn(b):
if b:
a = 0 # pylint:disable=unused-variable # noqa: F841
def child():
max(a) # pylint:disable=used-before-assignment # noqa: F823
a = 1
return a
child()
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'max')
def test_live_out_deletion(self):
def test_fn(x, y, a):
for _ in a:
if x:
del y
else:
y = 0 # noqa: F841
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ())
def test_live_in_pass(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
pass
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'x'))
self.assertHasLiveIn(fn_body[0].body[0], ('x',))
self.assertHasLiveIn(fn_body[1], ('x',))
def test_live_in_raise(self):
def test_fn(x, a, b, c):
if a > 0:
b = b + 1
raise c
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'c', 'x'))
self.assertHasLiveIn(fn_body[0].body[0], ('b', 'c'))
self.assertHasLiveIn(fn_body[1], ('x',))
def test_live_out_except_variable(self):
def test_fn(x, a):
try:
pass
except a as b:
raise b
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
# Note: 'a' is not live because there is no raise statement inside the
# try, and we discount the possibility of other code in the try block
# raising an error.
self.assertHasLiveIn(fn_body[0], ('b', 'x'))
def test_live_in_return_statement(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
return x
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'x'))
self.assertHasLiveIn(fn_body[0].body[0], ('x',))
self.assertHasLiveIn(fn_body[1], ('x',))
def test_live_in_try_block(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
try:
pass
except: # pylint:disable=bare-except # noqa: E722
pass
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'x'))
self.assertHasLiveIn(fn_body[0].body[0], ('x',))
self.assertHasLiveIn(fn_body[1], ('x',))
def test_live_in_try_orelse(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
try:
pass
except: # pylint:disable=bare-except # noqa: E722
pass
else:
x = b
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'x'))
self.assertHasLiveIn(fn_body[0].body[0], ('b', 'x'))
self.assertHasLiveIn(fn_body[1], ('x',))
def test_live_in_if_inside_except(self):
def test_fn(x, a, b, c): # pylint:disable=unused-argument
if a > 0:
try:
pass
except: # pylint:disable=bare-except # noqa: E722
if b > 0:
x = b
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'x'))
self.assertHasLiveIn(fn_body[0].body[0], ('b', 'x'))
self.assertHasLiveIn(fn_body[0].body[0].handlers[0].body[0], ('b', 'x'))
self.assertHasLiveIn(fn_body[1], ('x',))
def test_live_in_stacked_if(self):
def test_fn(x, a, b, c):
if a > 0:
x = b
if c > 1:
x = 0
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'c', 'x'))
self.assertHasLiveIn(fn_body[1], ('c', 'x'))
def test_live_in_stacked_if_else(self):
def test_fn(x, a, b, c, d):
if a > 1:
x = b
else:
x = c
if d > 0:
x = 0
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'c', 'd'))
self.assertHasLiveIn(fn_body[1], ('d', 'x'))
def test_live_in_for_basic(self):
def test_fn(x, y, a):
for i in a:
x = i
y += x
z = 0
return y, z
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'y', 'z'))
def test_live_in_for_nested(self):
def test_fn(x, y, a):
for i in a:
for j in i:
x = i
y += x
z = j
return y, z
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'y', 'z'))
def test_live_in_deletion(self):
def test_fn(x, y, a):
for _ in a:
if x:
del y
else:
y = 0 # noqa: F841
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'x', 'y'))
def test_live_in_generator_comprehension(self):
def test_fn(y):
if all(x for x in y):
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('all', 'y'))
def test_live_in_list_comprehension(self):
def test_fn(y):
if [x for x in y]:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('y',))
def test_live_in_list_comprehension_expression(self):
def test_fn(y, s):
s += foo([x for x in y]) # pylint:disable=undefined-variable # noqa: F821
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('y', 'foo', 's'))
def test_live_in_set_comprehension(self):
def test_fn(y):
if {x for x in y}:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('y',))
def test_live_in_dict_comprehension(self):
def test_fn(y):
if {k: v for k, v in y}:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('y',))
def test_global_symbol(self):
def test_fn(c):
global global_a
global global_b
if global_a:
global_b = c
else:
global_b = c
return global_b
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[2], ('global_b',))
self.assertHasLiveIn(fn_body[2], ('global_a', 'c'))
def test_nonlocal_symbol(self):
nonlocal_a = 3
nonlocal_b = 13
def test_fn(c):
nonlocal nonlocal_a
nonlocal nonlocal_b
if nonlocal_a:
nonlocal_b = c
else:
nonlocal_b = c
return nonlocal_b
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[2], ('nonlocal_b',))
self.assertHasLiveIn(fn_body[2], ('nonlocal_a', 'c'))
|
DALI-main
|
dali/test/python/autograph/pyct/static_analysis/test_liveness.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for type_inference module."""
import unittest
from typing import Any, Callable, List
from nvidia.dali._autograph.pyct import anno
from nvidia.dali._autograph.pyct import cfg
from nvidia.dali._autograph.pyct import qual_names
from nvidia.dali._autograph.pyct import transpiler
from nvidia.dali._autograph.pyct.static_analysis import activity
from nvidia.dali._autograph.pyct.static_analysis import reaching_definitions
from nvidia.dali._autograph.pyct.static_analysis import reaching_fndefs
from nvidia.dali._autograph.pyct.static_analysis import type_inference
class BasicTestResolver(type_inference.Resolver):
"""A very basic resolver for testing."""
def res_name(self, ns, types_ns, name):
str_name = str(name)
if str_name == 'int':
return {int}, int
return {type(ns[str_name])}, ns[str_name]
def res_value(self, ns, value):
return {type(value)}
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
if type_anno is None:
return None
return {str(type_anno)}
class TestTranspiler(transpiler.GenericTranspiler):
def __init__(self, resolver_type):
super().__init__()
self.resolver = resolver_type()
def get_transformed_name(self, _):
return 'test_item'
def transform_ast(self, node, ctx):
node = qual_names.resolve(node)
node = activity.resolve(node, ctx)
graphs = cfg.build(node)
node = reaching_definitions.resolve(node, ctx, graphs)
node = reaching_fndefs.resolve(node, ctx, graphs)
node = type_inference.resolve(node, ctx, graphs, self.resolver)
return node
class TypeInferenceAnalyzerTest(unittest.TestCase):
def assertTypes(self, node, expected):
if not isinstance(expected, tuple):
expected = expected,
self.assertSetEqual(
set(anno.getanno(node, anno.Static.TYPES)), set(expected))
def assertClosureTypes(self, node, expected):
actual = anno.getanno(node, anno.Static.CLOSURE_TYPES)
actual = {str(k): v for k, v in actual.items()}
for k, v in expected.items():
self.assertIn(k, actual)
self.assertEqual(actual[k], v)
def test_no_inference_on_unknown_operand_types(self):
class Resolver(type_inference.Resolver):
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
return None
def test_fn(a, b):
return a < b, a - b
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
# With no information on operand types, the operators will infer nothing.
self.assertFalse(
anno.hasanno(fn_body[0].value.elts[0], anno.Static.TYPES))
self.assertFalse(
anno.hasanno(fn_body[0].value.elts[1], anno.Static.TYPES))
def test_resolver_output_checked(self):
class Resolver(type_inference.Resolver):
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
return 1
def test_fn(a):
del a
pass
with self.assertRaisesRegex(ValueError, 'expected to return set'):
TestTranspiler(Resolver).transform(test_fn, None)
def test_argument(self):
test_self = self
class Resolver(type_inference.Resolver):
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
test_self.assertFalse(f_is_local)
if name == qual_names.QN('a'):
test_self.assertEqual(type_anno, qual_names.QN('int'))
return {str(name) + '_type'}
def test_fn(a: int, b):
return a, b
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].value.elts[0], 'a_type')
self.assertTypes(fn_body[0].value.elts[1], 'b_type')
def test_argument_of_local_function(self):
test_self = self
class Resolver(type_inference.Resolver):
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
if f_name == 'test_fn':
test_self.assertFalse(f_is_local)
test_self.assertEqual(name, qual_names.QN('a'))
test_self.assertEqual(type_anno, qual_names.QN('int'))
elif f_name == 'foo':
test_self.assertTrue(f_is_local)
if name == qual_names.QN('x'):
test_self.assertEqual(type_anno, qual_names.QN('float'))
elif name == qual_names.QN('y'):
test_self.assertIsNone(type_anno)
else:
test_self.fail('unexpected argument {} for {}'.format(name, f_name))
else:
test_self.fail('unexpected function name {}'.format(f_name))
return {str(name) + '_type'}
def test_fn(a: int):
def foo(x: float, y):
return x, y
return foo(a, a)
tr = TestTranspiler(Resolver)
node, _ = tr.transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].body[0].value, (('x_type', 'y_type'),))
self.assertTypes(fn_body[0].body[0].value.elts[0], 'x_type')
self.assertTypes(fn_body[0].body[0].value.elts[1], 'y_type')
def test_assign_straightline(self):
def test_fn(a: int, c: float):
b = a
return a, b, c
node, _ = TestTranspiler(BasicTestResolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].targets[0], 'int')
self.assertTypes(fn_body[0].value, 'int')
self.assertTypes(fn_body[1].value.elts[0], 'int')
self.assertTypes(fn_body[1].value.elts[1], 'int')
self.assertTypes(fn_body[1].value.elts[2], 'float')
def test_expr(self):
test_self = self
class Resolver(type_inference.Resolver):
def res_value(self, ns, value):
test_self.assertEqual(value, tc.a)
return {str}
def res_name(self, ns, types_ns, name):
test_self.assertEqual(name, qual_names.QN('tc'))
return {TestClass}, tc
def res_call(self, ns, types_ns, node, f_type, args, keywords):
test_self.assertEqual(f_type, (str,))
return {int}, None
class TestClass:
def a(self):
pass
tc = TestClass()
def test_fn():
tc.a()
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertEqual(
anno.getanno(fn_body[0].value.func, anno.Static.VALUE), tc.a)
self.assertTypes(fn_body[0].value.func, str)
self.assertTypes(fn_body[0].value, int)
self.assertTypes(fn_body[0], int)
def test_assign_overwriting(self):
def test_fn(a: int, b: float):
c = a
c = b
return c
node, _ = TestTranspiler(BasicTestResolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].targets[0], 'int')
self.assertTypes(fn_body[0].value, 'int')
self.assertTypes(fn_body[1].targets[0], 'float')
self.assertTypes(fn_body[1].value, 'float')
def test_dynamic_attribute_of_static_value(self):
test_self = self
class Resolver(type_inference.Resolver):
def res_value(self, ns, value):
test_self.assertEqual(value, tc.a)
return {int}
def res_name(self, ns, types_ns, name):
test_self.assertEqual(name, qual_names.QN('tc'))
return {TestClass}, tc
class TestClass:
def __init__(self):
self.a = 1
tc = TestClass()
def test_fn():
return tc.a
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].value.value, TestClass)
self.assertTypes(fn_body[0].value, int)
self.assertIs(anno.getanno(fn_body[0].value.value, anno.Static.VALUE), tc)
self.assertEqual(anno.getanno(fn_body[0].value, anno.Static.VALUE), tc.a)
def test_static_attribute_of_typed_value(self):
test_self = self
class TestClass:
a = 1
tc = TestClass()
class Resolver(type_inference.Resolver):
def res_name(self, ns, types_ns, name):
test_self.assertEqual(name, qual_names.QN('tc'))
return {TestClass}, None
def res_value(self, ns, value):
test_self.assertIs(value, tc.a)
return {str}
def test_fn():
return tc.a
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].value.value, TestClass)
self.assertTypes(fn_body[0].value, str) # Resolver is SOT
self.assertFalse(anno.hasanno(fn_body[0].value.value, anno.Static.VALUE))
self.assertEqual(anno.getanno(fn_body[0].value, anno.Static.VALUE), 1)
def test_static_attribute_of_ambiguous_type(self):
test_self = self
class TestClass1:
a = 1
class TestClass2:
a = 2
tc = TestClass1()
class Resolver(type_inference.Resolver):
def res_name(self, ns, types_ns, name):
test_self.assertEqual(name, qual_names.QN('tc'))
return {TestClass1, TestClass2}, None
def res_value(self, ns, value):
test_self.assertIn(value, (1, 2))
return {str}
def test_fn():
return tc.a
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].value.value, (TestClass1, TestClass2))
self.assertFalse(anno.hasanno(fn_body[0].value, anno.Static.TYPES))
self.assertFalse(anno.hasanno(fn_body[0].value.value, anno.Static.VALUE))
self.assertFalse(anno.hasanno(fn_body[0].value, anno.Static.VALUE))
def test_property_of_typed_value(self):
test_self = self
class TestClass:
@property
def a(self):
return 1
tc = TestClass()
class Resolver(type_inference.Resolver):
def res_name(self, ns, types_ns, name):
test_self.assertEqual(name, qual_names.QN('tc'))
return {TestClass}, None
def res_value(self, ns, value):
test_self.assertIs(value, TestClass.a)
test_self.assertNotEqual(value, 1) # Can't evaluate property of class.
return {property}
def test_fn():
return tc.a
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].value.value, TestClass)
self.assertTypes(fn_body[0].value, property)
self.assertFalse(anno.hasanno(fn_body[0].value.value, anno.Static.VALUE))
self.assertEqual(
anno.getanno(fn_body[0].value, anno.Static.VALUE), TestClass.a)
def test_dynamic_attribute_of_typed_value(self):
test_self = self
class TestClass:
def __init__(self):
self.a = 1
tc = TestClass()
class Resolver(type_inference.Resolver):
def res_name(self, ns, types_ns, name):
test_self.assertEqual(name, qual_names.QN('tc'))
return {TestClass}, None
def test_fn():
return tc.a
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].value.value, TestClass)
self.assertFalse(anno.hasanno(fn_body[0].value, anno.Static.TYPES))
self.assertFalse(anno.hasanno(fn_body[0].value.value, anno.Static.VALUE))
self.assertFalse(anno.hasanno(fn_body[0].value, anno.Static.VALUE))
def test_external_value(self):
a = 'foo'
def test_fn():
b = a
return b
node, _ = TestTranspiler(BasicTestResolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].targets[0], str)
self.assertTypes(fn_body[1].value, str)
def test_external_function(self):
test_self = self
class Resolver(type_inference.Resolver):
def res_name(self, ns, types_ns, name):
test_self.assertEqual(name, qual_names.QN('g'))
return {str}, g
def res_call(self, ns, types_ns, node, f_type, args, keywords):
test_self.assertEqual(f_type, (str,))
test_self.assertEqual(
anno.getanno(node.func, anno.Basic.QN), qual_names.QN('g'))
return {float}, None
def g() -> float:
return 1.0
def test_fn():
a = g()
return a
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].value.func, str)
self.assertTypes(fn_body[0].targets[0], float)
self.assertTypes(fn_body[1].value, float)
def test_external_function_side_effects(self):
test_self = self
class Resolver(type_inference.Resolver):
def res_name(self, ns, types_ns, name):
test_self.assertEqual(name, qual_names.QN('g'))
return None, g
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
return {str(type_anno)}
def res_call(self, ns, types_ns, node, f_type, args, keywords):
test_self.assertIsNone(f_type)
return None, {qual_names.QN('x'): {str}}
def g():
# The resolver will pretend that this function has the following body:
#
# nonlocal x
# x = 'a'
pass
def test_fn(x: int):
y = x
g()
return x, y
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].targets[0], 'int')
self.assertTypes(fn_body[0].value, 'int')
self.assertTypes(fn_body[2].value.elts[0], str)
self.assertTypes(fn_body[2].value.elts[1], 'int')
def test_local_function_closure(self):
def test_fn(x: int):
def foo():
return x
foo()
node, _ = TestTranspiler(BasicTestResolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].body[0].value, 'int')
self.assertClosureTypes(fn_body[0], {'x': {'int'}})
def test_local_function_closure_nested(self):
def test_fn(x: int):
def foo():
def bar():
return x
bar()
foo()
node, _ = TestTranspiler(BasicTestResolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].body[0].body[0].value, 'int')
self.assertClosureTypes(fn_body[0], {'x': {'int'}})
self.assertClosureTypes(fn_body[0].body[0], {'x': {'int'}})
def test_local_function_closure_mutable_var(self):
def test_fn(x: int):
def foo():
nonlocal x
return x
foo()
node, _ = TestTranspiler(BasicTestResolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].body[1].value, 'int')
self.assertClosureTypes(fn_body[0], {'x': {'int'}})
def test_local_function_closure_ignored_for_bound_symbols(self):
def test_fn(x: float): # pylint:disable=unused-argument
def foo():
x = x + 1 # pylint:disable=used-before-assignment # noqa: F823,F841
foo()
node, _ = TestTranspiler(BasicTestResolver).transform(test_fn, None)
fn_body = node.body
self.assertFalse(
anno.hasanno(fn_body[0].body[0].value.left, anno.Static.TYPES))
self.assertClosureTypes(fn_body[0], {'x': {'float'}})
def test_local_function_closure_uses_call_site_types(self):
def test_fn(x: int):
def foo():
return x
x = 1.0
foo()
node, _ = TestTranspiler(BasicTestResolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].body[0].value, float)
self.assertTypes(fn_body[1].targets[0], float)
self.assertClosureTypes(fn_body[0], {'x': {float}})
def test_local_function_hides_locals(self):
def test_fn(a: int): # pylint:disable=unused-argument
def local_fn(v):
a = v
return a
local_fn(1)
node, _ = TestTranspiler(BasicTestResolver).transform(test_fn, None)
fn_body = node.body
self.assertFalse(
anno.hasanno(fn_body[0].body[0].targets[0], anno.Static.TYPES))
def test_local_function_type(self):
def test_fn(x: int):
def foo() -> int:
return x
foo()
node, _ = TestTranspiler(BasicTestResolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[1].value.func, Callable[[Any], int])
self.assertTypes(fn_body[1].value, int)
self.assertTypes(fn_body[1], int)
def test_side_effects_on_arg_function_closure(self):
test_self = self
class Resolver(type_inference.Resolver):
def res_name(self, ns, types_ns, name):
test_self.assertEqual(name, qual_names.QN('g'))
return {Callable[[Callable], None]}, g
def res_value(self, ns, value):
test_self.assertEqual(value, 1.0)
return {float}
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
return {str(type_anno)}
def res_call(self, ns, types_ns, node, f_type, args, keywords):
test_self.assertEqual(node.func.id, 'g')
test_self.assertEqual(f_type, (Callable[[Callable], None],))
return None, {qual_names.QN('x'): {str}}
def g(foo):
# The resolver will convey that this function has the following body:
#
# nonlocal x
# x = 'a'
# foo()
del foo
pass
def test_fn(x: int): # pylint:disable=unused-argument
def foo():
return x
x = 1.0
g(foo)
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].body[0].value, str)
def test_subscript(self):
test_self = self
class Resolver(type_inference.Resolver):
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
return {list}
def res_value(self, ns, value):
return {int}
def res_slice(self, ns, types_ns, node, value, slice_):
test_self.assertSetEqual(value, {list})
test_self.assertSetEqual(slice_, {int})
return {str}
def test_fn(a):
return a[1]
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].value, str)
self.assertTypes(fn_body[0].value.value, list)
self.assertTypes(fn_body[0].value.slice, int)
def test_tuple_unpacking(self):
test_self = self
class Resolver(type_inference.Resolver):
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
return {list}
def res_value(self, ns, value):
return {int}
def res_slice(self, ns, types_ns, node_or_slice, value, slice_):
test_self.assertIn(node_or_slice, (0, 1))
test_self.assertSetEqual(value, {list})
test_self.assertSetEqual(slice_, {int})
if node_or_slice == 0:
return {float}
else:
return {str}
def test_fn(t):
a, b = t
return a, b
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[1].value, ((float, str),))
self.assertTypes(fn_body[1].value.elts[0], float)
self.assertTypes(fn_body[1].value.elts[1], str)
def test_compare(self):
test_self = self
class Resolver(type_inference.Resolver):
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
return {int}
def res_compare(self, ns, types_ns, node, left, right):
test_self.assertSetEqual(left, {int})
test_self.assertListEqual(right, [{int}])
return {bool}
def test_fn(a, b):
return a < b
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].value, bool)
self.assertTypes(fn_body[0].value.left, int)
self.assertTypes(fn_body[0].value.comparators[0], int)
def test_binop(self):
test_self = self
class Resolver(type_inference.Resolver):
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
return {list}
def res_binop(self, ns, types_ns, node, left, right):
test_self.assertSetEqual(left, {list})
test_self.assertSetEqual(right, {list})
return {float}
def test_fn(a, b):
return a @ b
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].value, float)
self.assertTypes(fn_body[0].value.left, list)
self.assertTypes(fn_body[0].value.right, list)
def test_unop(self):
class Resolver(type_inference.Resolver):
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
return {list}
def res_unop(self, ns, types_ns, node, opnd):
return {float}
def test_fn(a):
return -a
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].value, float)
self.assertTypes(fn_body[0].value.operand, list)
def test_tuple_literal(self):
class Resolver(type_inference.Resolver):
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
return {int}
def test_fn(a, b):
return a, b
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].value, ((int, int),))
self.assertTypes(fn_body[0].value.elts[0], int)
self.assertTypes(fn_body[0].value.elts[1], int)
def test_list_literal(self):
class Resolver(type_inference.Resolver):
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
return {int}
def res_list_literal(self, ns, elt_types):
all_types = set()
for s in elt_types:
all_types |= s
return {List[t] for t in all_types}
def test_fn(a, b):
return [a, b]
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[0].value, List[int])
self.assertTypes(fn_body[0].value.elts[0], int)
self.assertTypes(fn_body[0].value.elts[1], int)
def test_tuple_unpacking_syntactic(self):
test_self = self
class Resolver(type_inference.Resolver):
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
if name == qual_names.QN('a'):
return {int}
else:
return {float}
def res_value(self, ns, value):
test_self.assertIn(value, (0, 1))
return int
def res_slice(self, ns, types_ns, node_or_slice, value, slice_):
test_self.assertIn(node_or_slice, (0, 1))
test_self.assertSetEqual(value, {(int, float)})
test_self.assertEqual(slice_, int)
return {t[node_or_slice] for t in value}
def test_fn(a, b):
c, d = a, b
return c, d
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[1].value, ((int, float),))
self.assertTypes(fn_body[1].value.elts[0], int)
self.assertTypes(fn_body[1].value.elts[1], float)
def test_tuple_unpacking_operational(self):
test_self = self
class Resolver(type_inference.Resolver):
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
return {(int, float)}
def res_value(self, ns, value):
test_self.assertIn(value, (0, 1))
return int
def res_slice(self, ns, types_ns, node_or_slice, value, slice_):
test_self.assertIn(node_or_slice, (0, 1))
test_self.assertSetEqual(value, {(int, float)})
test_self.assertEqual(slice_, int)
return {t[node_or_slice] for t in value}
def test_fn(a):
c, d = a
return c, d
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
self.assertTypes(fn_body[1].value, ((int, float),))
self.assertTypes(fn_body[1].value.elts[0], int)
self.assertTypes(fn_body[1].value.elts[1], float)
def test_list_expansion_syntactic(self):
test_self = self
class Resolver(type_inference.Resolver):
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
if name == qual_names.QN('a'):
return {int}
else:
return {float}
def res_value(self, ns, value):
test_self.assertIn(value, (0, 1))
return int
def res_slice(self, ns, types_ns, node_or_slice, value, slice_):
test_self.assertIn(node_or_slice, (0, 1))
test_self.assertSetEqual(value, {(int, float)})
test_self.assertEqual(slice_, int)
return {t[node_or_slice] for t in value}
def test_fn(a, b):
[c, d] = a, b
return c, d
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
# TODO(mdan): Whether it's List or Tuple might be open for interpretation.
self.assertTypes(fn_body[1].value, ((int, float),))
self.assertTypes(fn_body[1].value.elts[0], int)
self.assertTypes(fn_body[1].value.elts[1], float)
def test_list_expansion_operational(self):
test_self = self
class Resolver(type_inference.Resolver):
def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):
if name == qual_names.QN('a'):
return {int}
else:
return {float}
def res_value(self, ns, value):
test_self.assertIn(value, (0, 1))
return int
def res_slice(self, ns, types_ns, node_or_slice, value, slice_):
test_self.assertIn(node_or_slice, (0, 1))
test_self.assertSetEqual(value, {(int, float)})
test_self.assertEqual(slice_, int)
return {t[node_or_slice] for t in value}
def test_fn(a, b):
[c, d] = a, b
return c, d
node, _ = TestTranspiler(Resolver).transform(test_fn, None)
fn_body = node.body
# TODO(mdan): Whether it's List or Tuple might be open for interpretation.
self.assertTypes(fn_body[1].value, ((int, float),))
self.assertTypes(fn_body[1].value.elts[0], int)
self.assertTypes(fn_body[1].value.elts[1], float)
|
DALI-main
|
dali/test/python/autograph/pyct/static_analysis/test_type_inference.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
DALI-main
|
dali/test/python/autograph/pyct/common_transformers/__init__.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anf module."""
import textwrap
import unittest
import gast
from nvidia.dali._autograph.pyct import loader
from nvidia.dali._autograph.pyct import parser
from nvidia.dali._autograph.pyct import transformer
from nvidia.dali._autograph.pyct.common_transformers import anf
# TODO(mdan): These two functions no longer need to be at the top level.
# TODO(mdan): Don't use exec.
def _exec_test_function():
# The point is to test A-normal form conversion of exec
# pylint: disable=exec-used
exec('computed' + 5 + 'stuff', globals(), locals())
def exec_expected_result():
# pylint: disable=exec-used
tmp_1001 = 'computed' + 5
tmp_1002 = tmp_1001 + 'stuff'
tmp_1003 = globals()
tmp_1004 = locals()
exec(tmp_1002, tmp_1003, tmp_1004)
class AnfTestBase(unittest.TestCase):
def _simple_context(self):
entity_info = transformer.EntityInfo(
name='test_fn',
source_code=None,
source_file=None,
future_features=(),
namespace=None)
return transformer.Context(entity_info, None, None)
def assert_same_ast(self, expected_node, node, msg=None):
expected_source = parser.unparse(expected_node, indentation=' ')
expected_str = textwrap.dedent(expected_source).strip()
got_source = parser.unparse(node, indentation=' ')
got_str = textwrap.dedent(got_source).strip()
self.assertEqual(expected_str, got_str, msg=msg)
def assert_body_anfs_as_expected(self, expected_fn, test_fn, config=None):
# Testing the code bodies only. Wrapping them in functions so the
# syntax highlights nicely, but Python doesn't try to execute the
# statements.
exp_node, _ = parser.parse_entity(expected_fn, future_features=())
node, _ = parser.parse_entity(test_fn, future_features=())
node = anf.transform(node, self._simple_context(), config=config)
exp_name = exp_node.name
# Ignoring the function names in the result because they can't be
# the same (because both functions have to exist in the same scope
# at the same time).
node.name = exp_name
self.assert_same_ast(exp_node, node)
# Check that ANF is idempotent
node_repeated = anf.transform(node, self._simple_context())
self.assert_same_ast(node_repeated, node)
class AnfTransformerTest(AnfTestBase):
def test_basic(self):
def test_function():
a = 0
return a
node, _ = parser.parse_entity(test_function, future_features=())
node = anf.transform(node, self._simple_context())
result, _, _ = loader.load_ast(node)
self.assertEqual(test_function(), result.test_function())
def test_binop_basic(self):
def test_function(x, y, z):
a = x + y + z
return a
def expected_result(x, y, z):
tmp_1001 = x + y
a = tmp_1001 + z
return a
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_if_basic(self):
def test_function(a, b, c, e, f, g):
if a + b + c:
d = e + f + g
return d
def expected_result(a, b, c, e, f, g):
tmp_1001 = a + b
tmp_1002 = tmp_1001 + c
if tmp_1002:
tmp_1003 = e + f
d = tmp_1003 + g
return d
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_nested_binop_and_return(self):
def test_function(b, c, d, e):
return (2 * b + c) + (d + e)
def expected_result(b, c, d, e):
tmp_1001 = 2 * b
tmp_1002 = tmp_1001 + c
tmp_1003 = d + e
tmp_1004 = tmp_1002 + tmp_1003
return tmp_1004
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_function_call_and_expr(self):
def test_function(call_something, a, b, y, z, c, d, e, f, g, h, i):
call_something(a + b, y * z, kwarg=c + d, *(e + f), **(g + h + i))
def expected_result(call_something, a, b, y, z, c, d, e, f, g, h, i):
tmp_1001 = g + h
tmp_1002 = a + b
tmp_1003 = y * z
tmp_1004 = e + f
tmp_1005 = c + d
tmp_1006 = tmp_1001 + i
call_something(tmp_1002, tmp_1003, kwarg=tmp_1005, *tmp_1004, **tmp_1006)
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_with_and_print(self):
def test_function(a, b, c):
with a + b + c as d:
print(2 * d + 1)
def expected_result(a, b, c):
tmp_1001 = a + b
tmp_1002 = tmp_1001 + c
with tmp_1002 as d:
tmp_1003 = 2 * d
tmp_1004 = tmp_1003 + 1
print(tmp_1004)
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_nested_multi_value_assign(self):
def test_function(a, b, c):
x, y = a, a + b
(z, y), x = (c, y + b), x + a
return z, (y, x)
def expected_result(a, b, c):
tmp_1001 = a + b
x, y = a, tmp_1001
tmp_1002 = y + b
tmp_1003 = (c, tmp_1002)
tmp_1004 = x + a
(z, y), x = tmp_1003, tmp_1004
tmp_1005 = y, x
tmp_1006 = z, tmp_1005
return tmp_1006
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_deeply_nested_multi_value_assign(self):
def test_function(a):
[([(b, c), [d, e]], (f, g)), [(h, i, j), k]] = a
return [([(b, c), [d, e]], (f, g)), [(h, i, j), k]]
def expected_result(a):
[([(b, c), [d, e]], (f, g)), [(h, i, j), k]] = a
tmp_1001 = b, c
tmp_1002 = [d, e]
tmp_1003 = [tmp_1001, tmp_1002]
tmp_1004 = f, g
tmp_1005 = h, i, j
tmp_1006 = tmp_1003, tmp_1004
tmp_1007 = [tmp_1005, k]
tmp_1008 = [tmp_1006, tmp_1007]
return tmp_1008
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_local_definition_and_binary_compare(self):
def test_function():
def foo(a, b):
return 2 * a < b
return foo
def expected_result():
def foo(a, b):
tmp_1001 = 2 * a
tmp_1002 = tmp_1001 < b
return tmp_1002
return foo
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_list_literal(self):
def test_function(a, b, c, d, e, f):
return [a + b, c + d, e + f]
def expected_result(a, b, c, d, e, f):
tmp_1001 = a + b
tmp_1002 = c + d
tmp_1003 = e + f
tmp_1004 = [tmp_1001, tmp_1002, tmp_1003]
return tmp_1004
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_tuple_literal_and_unary(self):
def test_function(a, b, c, d, e, f):
return (a + b, -(c + d), e + f)
def expected_result(a, b, c, d, e, f):
tmp_1001 = c + d
tmp_1002 = a + b
tmp_1003 = -tmp_1001
tmp_1004 = e + f
tmp_1005 = (tmp_1002, tmp_1003, tmp_1004)
return tmp_1005
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_set_literal(self):
def test_function(a, b, c, d, e, f):
return set(a + b, c + d, e + f)
def expected_result(a, b, c, d, e, f):
tmp_1001 = a + b
tmp_1002 = c + d
tmp_1003 = e + f
tmp_1004 = set(tmp_1001, tmp_1002, tmp_1003)
return tmp_1004
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_dict_literal_and_repr(self):
def test_function(foo, bar, baz):
return repr({foo + bar + baz: 7 | 8})
def expected_result(foo, bar, baz):
tmp_1001 = foo + bar
tmp_1002 = tmp_1001 + baz
tmp_1003 = 7 | 8
tmp_1004 = {tmp_1002: tmp_1003}
tmp_1005 = repr(tmp_1004)
return tmp_1005
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_field_read_and_write(self):
def test_function(a, d):
a.b.c = d.e.f + 3
def expected_result(a, d):
tmp_1001 = a.b
tmp_1002 = d.e
tmp_1003 = tmp_1002.f
tmp_1001.c = tmp_1003 + 3
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_subscript_read_and_write(self):
def test_function(a, b, c, d, e, f):
a[b][c] = d[e][f] + 3
def expected_result(a, b, c, d, e, f):
tmp_1001 = a[b]
tmp_1002 = d[e]
tmp_1003 = tmp_1002[f]
tmp_1001[c] = tmp_1003 + 3
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_augassign_and_delete(self):
def test_function(a, x, y, z):
a += x + y + z
del a
del z[y][x]
def expected_result(a, x, y, z):
tmp_1001 = x + y
a += tmp_1001 + z
del a
tmp_1002 = z[y]
del tmp_1002[x]
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_raise_yield_and_raise(self):
def test_function(a, c, some_computed, exception):
yield a ** c
raise some_computed('complicated' + exception)
def expected_result(a, c, some_computed, exception):
tmp_1001 = a ** c
yield tmp_1001
tmp_1002 = 'complicated' + exception
tmp_1003 = some_computed(tmp_1002)
raise tmp_1003
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_with_and_if_with_expressions(self):
def test_function(foo, bar, function, quux, quozzle, w, x, y, z):
with foo + bar:
function(x + y)
if quux + quozzle:
function(z / w)
def expected_result(foo, bar, function, quux, quozzle, w, x, y, z):
tmp_1001 = foo + bar
with tmp_1001:
tmp_1002 = x + y
function(tmp_1002)
tmp_1003 = quux + quozzle
if tmp_1003:
tmp_1004 = z / w
function(tmp_1004)
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_exec(self):
self.assert_body_anfs_as_expected(exec_expected_result, _exec_test_function)
def test_simple_while_and_assert(self):
def test_function(foo, quux):
while foo:
assert quux
foo = foo + 1 * 3
def expected_result(foo, quux):
while foo:
assert quux
tmp_1001 = 1 * 3
foo = foo + tmp_1001
self.assert_body_anfs_as_expected(expected_result, test_function)
def test_for(self):
def test_function(compute, something, complicated, foo):
for foo in compute(something + complicated):
bar = foo + 1 * 3
return bar
def expected_result(compute, something, complicated, foo):
tmp_1001 = something + complicated
tmp_1002 = compute(tmp_1001)
for foo in tmp_1002:
tmp_1003 = 1 * 3
bar = foo + tmp_1003
return bar
self.assert_body_anfs_as_expected(expected_result, test_function)
# This test collects several examples where the definition of A-normal form
# implemented by this transformer is questionable. Mostly it's here to spell
# out what the definition is in these cases.
def test_controversial(self):
def test_function(b, c, d, f):
a = c + d
a.b = c + d
a[b] = c + d
a += c + d
a, b = c
a, b = c, d
a = f(c)
a = f(c + d)
a[b + d] = f.e(c + d)
def expected_result(b, c, d, f):
a = c + d
a.b = c + d # Should be a.b = tmp? (Definitely not tmp = c + d)
a[b] = c + d # Should be a[b] = tmp? (Definitely not tmp = c + d)
a += c + d # Should be a += tmp? (Definitely not tmp = c + d)
a, b = c # Should be a = c[0], b = c[1]? Or not?
a, b = c, d # Should be a = c, b = d? Or not?
a = f(c)
tmp_1001 = c + d
a = f(tmp_1001)
tmp_1002 = b + d
tmp_1003 = f.e
tmp_1004 = c + d
a[tmp_1002] = tmp_1003(tmp_1004) # Or should be a[tmp1] = tmp2?
self.assert_body_anfs_as_expected(expected_result, test_function)
class AnfNonTransformationTest(AnfTransformerTest):
"""Test that specifying "no transformation" does nothing.
Reuses all the examples of AnfTransformerTest by overriding
`assert_body_anfs_as_expected_`.
"""
def assert_body_anfs_as_expected(self, expected_fn, test_fn):
# Testing the code bodies only. Wrapping them in functions so the
# syntax highlights nicely, but Python doesn't try to execute the
# statements.
node, _ = parser.parse_entity(test_fn, future_features=())
orig_source = parser.unparse(node, indentation=' ')
orig_str = textwrap.dedent(orig_source).strip()
config = [(anf.ANY, anf.LEAVE)] # Configuration to transform nothing
node = anf.transform(node, self._simple_context(), config=config)
new_source = parser.unparse(node, indentation=' ')
new_str = textwrap.dedent(new_source).strip()
self.assertEqual(orig_str, new_str)
class AnfConfiguredTest(AnfTestBase):
def test_constants_in_function_calls(self):
# An example specific configuration that differs from the default: Moving
# literals out of being directly passed to functions, but nothing else.
try:
# TODO(b/140808434): Fix this.
# gast pre-0.3
literals = (gast.Num, gast.Str, gast.Bytes, gast.NameConstant, gast.Name)
except AttributeError:
# gast 0.3+
literals = (gast.Constant, gast.Name)
config = [(anf.ASTEdgePattern(gast.Call, anf.ANY, literals), anf.REPLACE)]
def test_function(x, frob):
return frob(x, x+1, 2)
def expected_result(x, frob):
tmp_1001 = 2
return frob(x, x+1, tmp_1001)
self.assert_body_anfs_as_expected(expected_result, test_function, config)
def test_anf_some_function_calls(self):
# Another example specific configuration that differs from the default:
# Moving all arguments out of some function calls but leaving others be.
allowlist = ['foo']
def transform(parent, field, child):
del field
del child
func_name = parent.func.id
return str(func_name) in allowlist
config = [(anf.ASTEdgePattern(gast.Call, anf.ANY, anf.ANY), transform)]
def test_function(x, foo, bar):
y = foo(x, x+1, 2)
return bar(y, y+1, 2)
def expected_result(x, foo, bar):
tmp_1001 = x+1
tmp_1002 = 2
y = foo(x, tmp_1001, tmp_1002)
return bar(y, y+1, 2)
self.assert_body_anfs_as_expected(expected_result, test_function, config)
def test_touching_name_constant(self):
# Checking that the nodes for `True`, `False`, and `None` can be manipulated
# by a configuration. This is non-trivial, because in Python 2 those are
# represented as `Name`, which is the same node type as variable references.
specials = (gast.Name, gast.Constant)
config = [(anf.ASTEdgePattern(gast.Call, anf.ANY, specials), anf.REPLACE)]
def test_function(f):
return f(True, False, None)
def expected_result(f):
tmp_1001 = True
tmp_1002 = False
tmp_1003 = None
return f(tmp_1001, tmp_1002, tmp_1003)
self.assert_body_anfs_as_expected(expected_result, test_function, config)
|
DALI-main
|
dali/test/python/autograph/pyct/common_transformers/test_anf.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import nvidia.dali as dali
import os
import tempfile
from glob import glob
from nose.tools import assert_equal
import webdataset_base as base
from nose_utils import assert_raises
from test_utils import compare_pipelines, get_dali_extra_path
def general_corner_case(
test_batch_size=base.test_batch_size, dtypes=None, missing_component_behavior="", **kwargs
):
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar")
index_file = base.generate_temp_index_file(tar_file_path)
extract_dir = base.generate_temp_extract(tar_file_path)
equivalent_files = sorted(
glob(extract_dir.name + "/*"), key=lambda s: int(s[s.rfind("/") + 1: s.rfind(".")])
)
compare_pipelines(
base.webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
missing_component_behavior=missing_component_behavior,
dtypes=dtypes,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
**kwargs
),
base.file_reader_pipeline(
equivalent_files,
["jpg", "cls"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
**kwargs
),
test_batch_size,
math.ceil(num_samples / test_batch_size),
)
def test_mmap_dtype_incompatibility():
assert_raises(
RuntimeError,
general_corner_case,
dtypes=[dali.types.INT8, dali.types.FLOAT64],
glob="component size and dtype incompatible",
)
def test_lazy_init():
general_corner_case(lazy_init=True)
def test_read_ahead():
general_corner_case(read_ahead=True)
def test_single_sample():
test_batch_size = 1
num_samples = 1
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/sample-tar/single.tar")
index_file = base.generate_temp_index_file(tar_file_path)
extract_dir = base.generate_temp_extract(tar_file_path)
equivalent_files = list(sorted(glob(extract_dir.name + "/*")))
compare_pipelines(
base.webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["txt"],
missing_component_behavior="skip",
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
base.file_reader_pipeline(
equivalent_files, ["txt"], batch_size=test_batch_size, device_id=0, num_threads=1
),
test_batch_size,
math.ceil(num_samples / test_batch_size) * 10,
)
wds_pipeline = base.webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["txt"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
)
wds_pipeline.build()
assert_equal(list(wds_pipeline.epoch_size().values())[0], num_samples)
def test_single_sample_and_junk():
test_batch_size = 1
num_samples = 1
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/sample-tar/single_junk.tar")
index_file = base.generate_temp_index_file(tar_file_path)
extract_dir = base.generate_temp_extract(tar_file_path)
equivalent_files = list(sorted(glob(extract_dir.name + "/*")))
compare_pipelines(
base.webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["txt"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
base.file_reader_pipeline(
equivalent_files, ["txt"], batch_size=test_batch_size, device_id=0, num_threads=1
),
test_batch_size,
math.ceil(num_samples / test_batch_size) * 10,
)
wds_pipeline = base.webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["txt"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
)
wds_pipeline.build()
assert_equal(list(wds_pipeline.epoch_size().values())[0], num_samples)
def test_wide_sample():
test_batch_size = 1
num_samples = 1
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/sample-tar/wide.tar")
index_file = base.generate_temp_index_file(tar_file_path)
extract_dir = base.generate_temp_extract(tar_file_path)
equivalent_files = list(sorted(glob(extract_dir.name + "/*")))
num_components = 1000
compare_pipelines(
base.webdataset_raw_pipeline(
tar_file_path,
index_file.name,
[str(x) for x in range(num_components)],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
base.file_reader_pipeline(
equivalent_files,
[str(x) for x in range(num_components)],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / test_batch_size) * 10,
)
wds_pipeline = base.webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["txt"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
)
wds_pipeline.build()
assert_equal(list(wds_pipeline.epoch_size().values())[0], num_samples)
def test_argument_errors():
def paths_index_paths_error():
webdataset_pipeline = base.webdataset_raw_pipeline(
[
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar"),
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-1.tar"),
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-2.tar"),
],
["test.idx"],
["jpg", "cls"],
batch_size=1,
device_id=0,
num_threads=1,
)
webdataset_pipeline.build()
assert_raises(
RuntimeError,
paths_index_paths_error,
glob="The number of index files, if any, must match the number of archives in the dataset",
)
assert_raises(
RuntimeError,
general_corner_case,
missing_component_behavior="SomethingInvalid",
glob="Invalid value for missing_component_behavior",
)
general_corner_case(missing_component_behavior="Skip")
assert_raises(
RuntimeError,
general_corner_case,
dtypes=[dali.types.STRING, dali.types.STRING],
glob="Unsupported output dtype *. Supported types are",
)
assert_raises(
RuntimeError,
general_corner_case,
dtypes=dali.types.INT8,
glob="Number of extensions does not match the number of provided types",
)
def general_index_error(
index_file_contents,
tar_file_path="db/webdataset/MNIST/devel-0.tar",
ext="jpg"
):
index_file = tempfile.NamedTemporaryFile()
index_file.write(index_file_contents)
index_file.flush()
webdataset_pipeline = base.webdataset_raw_pipeline(
os.path.join(get_dali_extra_path(), tar_file_path),
index_file.name,
ext,
batch_size=1,
device_id=0,
num_threads=1,
)
webdataset_pipeline.build()
webdataset_pipeline.run()
webdataset_pipeline.run()
def test_index_errors():
assert_raises(RuntimeError, general_index_error, b"", glob="no version signature found")
assert_raises(RuntimeError, general_index_error, b"v0.1",
glob="Unsupported version of the index file (v0.1).",
)
assert_raises(RuntimeError, general_index_error, b"v1.1", glob="no sample count found")
assert_raises(
RuntimeError, general_index_error, b"v1.1 -1", glob="sample count must be positive"
)
assert_raises(
RuntimeError, general_index_error, b"v1.1 1\n", glob="no extensions provided for the sample"
)
assert_raises(
RuntimeError,
general_index_error,
b"v1.1 1\njpg",
glob="Could not find all necessary component parameters",
)
assert_raises(
RuntimeError,
general_index_error,
b"v1.1 1\njpg 1 0",
glob="tar offset is not a multiple of tar block size",
)
assert_raises(
RuntimeError,
general_index_error,
b"v1.1 1\njpg 51200 1",
"db/webdataset/sample-tar/empty.tar",
glob="offset is outside of the archive file",
)
|
DALI-main
|
dali/test/python/reader/test_webdataset_corner.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import sys
from nvidia.dali import pipeline_def
import nvidia.dali.fn as fn
from astropy.io import fits
import numpy as np
import tempfile
import random
from nose2.tools import params
from test_utils import to_array
from numpy.testing import assert_array_equal
def enum_product(*params):
return tuple((i,) + t for i, t in enumerate(itertools.product(*params)))
def create_fits_file(np_rng, filename, shape, type=np.int32, compressed=False, hdus=1):
hdu_list = [fits.PrimaryHDU(header=None)]
for i in range(hdus):
data = np_rng.randint(100, size=shape).astype(type)
hdu = fits.ImageHDU(data, name=f"IMAGE{i + 1}")
if compressed:
hdu = fits.CompImageHDU(data, name=f"IMAGE{i + 1}")
hdu_list.append(hdu)
hdulist = fits.HDUList(hdu_list)
hdulist.writeto(filename, overwrite=True)
@pipeline_def
def FitsReaderPipeline(path, device="cpu", file_list=None, files=None, file_filter="*.fits",
hdu_indices=[2], dtype=None):
data = fn.experimental.readers.fits(device=device, file_list=file_list, files=files,
file_root=path, file_filter=file_filter, shard_id=0,
num_shards=1, hdu_indices=hdu_indices)
return tuple(data) if type(data) is list else data
def get_dtypes(compression):
# keep it as a list to retain the declaration order of elements
all = [
np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16,
np.uint32, np.uint64, np.float32, np.float64,
]
excluded = set()
# The astropy is not actively developed for Python3.6 and the last available
# version does not support some dtypes
vi = sys.version_info
if vi.major < 3 or (vi.major == 3 and vi.minor <= 7):
excluded |= {np.int8}
# Astropy doesn't support writing those types to compressed image
# Futhermore, due to issues with gpu acceleration of rice decoding of floats,
# it is not supported yet
if compression:
excluded |= {np.int64, np.uint64, np.float32, np.float64}
return [dtype for dtype in all if dtype not in excluded]
# Test shapes, for each number of dims, astropy & fits do not handle dims = ()
test_shapes = {
1: [(10,), (12,), (10,), (20,), (10,), (12,), (13,), (19,)],
2: [(10, 10), (12, 10), (10, 12), (20, 15), (10, 11), (12, 11), (13, 11), (19, 10)],
3: [(6, 2, 5), (5, 6, 2), (3, 3, 3), (10, 1, 8), (8, 8, 3), (2, 2, 3), (8, 4, 3), (1, 10, 1)],
4: [(2, 6, 2, 5), (5, 1, 6, 2), (3, 2, 3, 3), (1, 10, 1, 8), (2, 8, 2, 3), (2, 3, 2, 3),
(1, 8, 4, 3), (1, 3, 10, 1)],
}
def _testimpl_types_and_shapes(np_rng, device, shapes, num_outputs, type, batch_size, num_threads,
compressed_arg, file_arg_type):
""" compare reader with astropy, with different batch_size and num_threads """
nsamples = len(shapes)
# setup files
with tempfile.TemporaryDirectory() as test_data_root:
# setup file
filenames = ["test_{:02d}.fits".format(i) for i in range(nsamples)]
full_paths = [os.path.join(test_data_root, fname) for fname in filenames]
for i in range(nsamples):
compressed = compressed_arg
if compressed is None:
compressed = random.choice([False, True])
create_fits_file(np_rng, full_paths[i], shapes[i], type, compressed, num_outputs)
# load manually, we skip primary HDU since it only stores metadata
# astropy returns data from each HDUs as a ndarray
hduls = [fits.open(filename) for filename in full_paths]
arrays = [hdu.data for hdul in hduls for hdu in hdul[1:]]
hdu_indices = list(range(2, num_outputs + 2))
# load with numpy reader
file_list_arg = None
files_arg = None
file_filter_arg = None
if file_arg_type == 'file_list':
file_list_arg = os.path.join(test_data_root, "input.lst")
with open(file_list_arg, "w") as f:
f.writelines("\n".join(filenames))
elif file_arg_type == 'files':
files_arg = filenames
elif file_arg_type == "file_filter":
file_filter_arg = "*.fits"
else:
assert False
pipe = FitsReaderPipeline(path=test_data_root, files=files_arg, file_list=file_list_arg,
file_filter=file_filter_arg, device=device, batch_size=batch_size,
num_threads=num_threads, device_id=0, hdu_indices=hdu_indices)
try:
pipe.build()
i = 0
while i < nsamples:
pipe_out = pipe.run()
for s in range(batch_size):
if i >= nsamples:
break
for out_idx in range(num_outputs):
pipe_arr = to_array(pipe_out[out_idx][s])
ref_arr = arrays[i + out_idx]
assert_array_equal(pipe_arr, ref_arr)
i += num_outputs
finally:
del pipe
@params(*enum_product(get_dtypes(False), [1, 2, 3, 4], ["cpu", "gpu"]))
def test_reading_uncompressed(i, dtype, ndim, device):
rng = np.random.default_rng(42 + i)
np_rng = np.random.RandomState(12345 + i)
compressed = False
shapes = test_shapes[ndim]
file_arg_type = rng.choice(['file_list', 'files', 'file_filter'])
num_threads = rng.choice([1, 2, 3, 4, 5, 6, 7, 8])
batch_size = rng.choice([1, 3, 4, 8, 16])
num_outputs = rng.choice([1, 3, 4, 8])
_testimpl_types_and_shapes(np_rng, device, shapes, num_outputs, dtype, batch_size, num_threads,
compressed, file_arg_type)
@params(*enum_product(get_dtypes(True), [1, 2, 3], ["cpu", "gpu"]))
def test_reading_compressed(i, dtype, ndim, device):
assert ndim <= 3 # astropy doesn't support compression of images with more dimensions
rng = np.random.default_rng(42 + i)
np_rng = np.random.RandomState(12345 + i)
compressed = True
shapes = test_shapes[ndim]
file_arg_type = rng.choice(['file_list', 'files', 'file_filter'])
num_threads = rng.choice([1, 2, 3, 4, 5, 6, 7, 8])
batch_size = rng.choice([1, 3, 4, 8, 16])
num_outputs = rng.choice([1, 3, 4, 8])
_testimpl_types_and_shapes(np_rng, device, shapes, num_outputs, dtype, batch_size, num_threads,
compressed, file_arg_type)
|
DALI-main
|
dali/test/python/reader/test_fits.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import Pipeline, pipeline_def
from nvidia.dali import fn
import nvidia.dali.types as types
import scipy.io.wavfile
import numpy as np
import json
import tempfile
import os
from test_audio_decoder_utils import generate_waveforms
from test_utils import compare_pipelines
def create_manifest_file(manifest_file, names, lengths, rates, texts):
assert len(names) == len(lengths) == len(rates) == len(texts)
data = []
for idx in range(len(names)):
entry_i = {}
entry_i['audio_filepath'] = names[idx]
entry_i['duration'] = lengths[idx] * (1.0 / rates[idx])
entry_i["text"] = texts[idx]
data.append(entry_i)
with open(manifest_file, 'w') as f:
for entry in data:
json.dump(entry, f)
f.write('\n')
tmp_dir = tempfile.TemporaryDirectory()
names = [
os.path.join(tmp_dir.name, "dali_test_1C.wav"),
os.path.join(tmp_dir.name, "dali_test_2C.wav"),
os.path.join(tmp_dir.name, "dali_test_4C.wav")
]
freqs = [
np.array([0.02]),
np.array([0.01, 0.012]),
np.array([0.01, 0.012, 0.013, 0.014])
]
rates = [22050, 22050, 12347]
lengths = [10000, 54321, 12345]
def create_ref():
ref = []
for i in range(len(names)):
wave = generate_waveforms(lengths[i], freqs[i])
wave = (wave * 32767).round().astype(np.int16)
ref.append(wave)
return ref
ref_i = create_ref()
def create_wav_files():
for i in range(len(names)):
scipy.io.wavfile.write(names[i], rates[i], ref_i[i])
create_wav_files()
ref_text_literal = [
"dali test 1C",
"dali test 2C",
"dali test 4C",
]
nemo_asr_manifest = os.path.join(tmp_dir.name, "nemo_asr_manifest.json")
create_manifest_file(nemo_asr_manifest, names, lengths, rates, ref_text_literal)
ref_text = [np.frombuffer(bytes(s, "utf8"), dtype=np.uint8) for s in ref_text_literal]
ref_text_non_ascii_literal = [
u"dzień dobry",
u"доброе утро",
u"这是一个测试",
]
nemo_asr_manifest_non_ascii = os.path.join(tmp_dir.name, "nemo_asr_manifest_non_ascii.json")
create_manifest_file(nemo_asr_manifest_non_ascii, names, lengths, rates, ref_text_non_ascii_literal)
ref_text_non_ascii = [np.frombuffer(bytes(s, "utf8"), dtype=np.uint8)
for s in ref_text_non_ascii_literal]
rate1 = 16000
rate2 = 44100
class NemoAsrReaderPipeline(Pipeline):
def __init__(self, batch_size=8):
super(NemoAsrReaderPipeline, self).__init__(
batch_size=batch_size, num_threads=1, device_id=0,
exec_async=True, exec_pipelined=True)
def define_graph(self):
fixed_seed = 12345
audio_plain_i = fn.readers.nemo_asr(
manifest_filepaths=[nemo_asr_manifest], dtype=types.INT16, downmix=False,
read_sample_rate=False, read_text=False, seed=fixed_seed)
audio_plain_f = fn.readers.nemo_asr(
manifest_filepaths=[nemo_asr_manifest], dtype=types.FLOAT, downmix=False,
read_sample_rate=False, read_text=False, seed=fixed_seed)
audio_downmix_i = fn.readers.nemo_asr(
manifest_filepaths=[nemo_asr_manifest], dtype=types.INT16, downmix=True,
read_sample_rate=False, read_text=False, seed=fixed_seed)
audio_downmix_f = fn.readers.nemo_asr(
manifest_filepaths=[nemo_asr_manifest], dtype=types.FLOAT, downmix=True,
read_sample_rate=False, read_text=False, seed=fixed_seed)
audio_resampled1_i, _ = fn.readers.nemo_asr(
manifest_filepaths=[nemo_asr_manifest], dtype=types.INT16, downmix=True,
sample_rate=rate1, read_sample_rate=True, read_text=False, seed=fixed_seed)
audio_resampled1_f, _ = fn.readers.nemo_asr(
manifest_filepaths=[nemo_asr_manifest], dtype=types.FLOAT, downmix=True,
sample_rate=rate1, read_sample_rate=True, read_text=False, seed=fixed_seed)
audio_resampled2_i, _ = fn.readers.nemo_asr(
manifest_filepaths=[nemo_asr_manifest], dtype=types.INT16, downmix=True,
sample_rate=rate2, read_sample_rate=True, read_text=False, seed=fixed_seed)
audio_resampled2_f, _ = fn.readers.nemo_asr(
manifest_filepaths=[nemo_asr_manifest], dtype=types.FLOAT, downmix=True,
sample_rate=rate2, read_sample_rate=True, read_text=False, seed=fixed_seed)
_, _, text = fn.readers.nemo_asr(
manifest_filepaths=[nemo_asr_manifest], dtype=types.INT16, downmix=True,
read_sample_rate=True, read_text=True, seed=fixed_seed)
_, _, text_non_ascii = fn.readers.nemo_asr(
manifest_filepaths=[nemo_asr_manifest_non_ascii], dtype=types.INT16, downmix=True,
read_sample_rate=True, read_text=True, seed=fixed_seed)
return audio_plain_i, audio_plain_f, audio_downmix_i, audio_downmix_f, \
audio_resampled1_i, audio_resampled1_f, audio_resampled2_i, audio_resampled2_f, \
text, text_non_ascii
def test_decoded_vs_generated():
batch_size = 3
pipeline = NemoAsrReaderPipeline(batch_size=batch_size)
pipeline.build()
for iter in range(1):
out = pipeline.run()
for idx in range(batch_size):
audio_plain_i = out[0].at(idx)
audio_plain_f = out[1].at(idx)
audio_downmix_i = out[2].at(idx)
audio_downmix_f = out[3].at(idx)
audio_resampled1_i = out[4].at(idx)
audio_resampled1_f = out[5].at(idx)
audio_resampled2_i = out[6].at(idx)
audio_resampled2_f = out[7].at(idx)
text = out[8].at(idx)
text_non_ascii = out[9].at(idx)
ref_plain_i = ref_i[idx]
np.testing.assert_allclose(audio_plain_i, ref_plain_i, rtol=1e-7)
ref_plain_f = ref_i[idx].astype(np.float32) / 32767
np.testing.assert_allclose(audio_plain_f, ref_plain_f, rtol=1e-4)
ref_downmix_i_float = ref_i[idx].astype(np.float32).mean(axis=1, keepdims=1)
ref_downmix_i = ref_downmix_i_float.astype(np.int16).flatten()
np.testing.assert_allclose(audio_downmix_i, ref_downmix_i, atol=1)
ref_downmix_f = (ref_downmix_i_float / 32767).flatten()
np.testing.assert_allclose(audio_downmix_f, ref_downmix_f, rtol=1e-4)
ref_resampled1_float = generate_waveforms(
lengths[idx] * rate1 / rates[idx], freqs[idx] * (rates[idx] / rate1))
ref_resampled1_downmix = ref_resampled1_float.astype(
np.float32).mean(axis=1, keepdims=1)
ref_resampled1_i = (ref_resampled1_downmix * 32767).astype(np.int16).flatten()
# resampling - allow for 1e-3 dynamic range error
np.testing.assert_allclose(
audio_resampled1_i, ref_resampled1_i, atol=round(32767 * 1e-3))
ref_resampled1_f = ref_resampled1_downmix.flatten()
# resampling - allow for 1e-3 dynamic range error
np.testing.assert_allclose(audio_resampled1_f, ref_resampled1_f, atol=1e-3)
ref_resampled2_float = generate_waveforms(
lengths[idx] * rate2 / rates[idx], freqs[idx] * (rates[idx] / rate2))
ref_resampled2_downmix = ref_resampled2_float.astype(
np.float32).mean(axis=1, keepdims=1)
ref_resampled2_i = (ref_resampled2_downmix * 32767).astype(np.int16).flatten()
# resampling - allow for 1e-3 dynamic range error
np.testing.assert_allclose(
audio_resampled2_i, ref_resampled2_i, atol=round(32767 * 1e-3))
ref_resampled2_f = ref_resampled2_downmix.flatten()
# resampling - allow for 1e-3 dynamic range error
np.testing.assert_allclose(audio_resampled2_f, ref_resampled2_f, atol=1e-3)
np.testing.assert_equal(text, ref_text[idx])
np.testing.assert_equal(text_non_ascii, ref_text_non_ascii[idx])
text_non_ascii_str = str(text_non_ascii.tobytes(), encoding='utf8')
# Checking that we don't have any trailing zeros
# (those won't be caught by the string comparison)
ref_text_non_ascii_literal_bytes = bytes(ref_text_non_ascii_literal[idx], 'utf8')
assert text_non_ascii.tobytes() == ref_text_non_ascii_literal_bytes, \
f"'{text_non_ascii.tobytes()}' != '{ref_text_non_ascii_literal_bytes}'"
# String comparison (utf-8)
assert text_non_ascii_str == ref_text_non_ascii_literal[idx], \
f"'{text_non_ascii_str}' != '{ref_text_non_ascii_literal[idx]}'"
batch_size_alias_test = 64
@pipeline_def(batch_size=batch_size_alias_test, device_id=0, num_threads=4)
def nemo_pipe(nemo_op, path, read_text, read_sample_rate, dtype, downmix):
if read_sample_rate:
audio, sr = nemo_op(manifest_filepaths=path, read_sample_rate=read_sample_rate,
read_text=read_text, dtype=dtype, downmix=downmix)
return audio, sr
elif read_text:
audio, text = nemo_op(manifest_filepaths=path, read_sample_rate=read_sample_rate,
read_text=read_text, dtype=dtype, downmix=downmix)
return audio, text
else:
audio = nemo_op(manifest_filepaths=path, read_sample_rate=read_sample_rate,
read_text=read_text, dtype=dtype, downmix=downmix)
return audio
def test_nemo_asr_reader_alias():
for read_sr, read_text in [(True, False), (False, True), (False, False)]:
for dtype in [types.INT16, types.FLOAT]:
for downmix in [True, False]:
new_pipe = nemo_pipe(
fn.readers.nemo_asr,
[nemo_asr_manifest], read_sr, read_text, dtype, downmix)
legacy_pipe = nemo_pipe(
fn.nemo_asr_reader,
[nemo_asr_manifest], read_sr, read_text, dtype, downmix)
compare_pipelines(new_pipe, legacy_pipe, batch_size_alias_test, 50)
def test_nemo_asr_reader_pad_last_batch():
@pipeline_def(device_id=0, num_threads=4)
def nemo_asr_pad_last_batch_pipe():
audio = fn.readers.nemo_asr(manifest_filepaths=[nemo_asr_manifest], pad_last_batch=True,
read_sample_rate=False, read_text=False)
return audio
def _testimpl_nemo_asr_reader_pad_last_batch(batch_size):
pipe = nemo_asr_pad_last_batch_pipe(batch_size=batch_size)
pipe.build()
dataset_len = len(names)
assert dataset_len % batch_size > 0 # Checking that we need to pad
sample_idx = 0
for it in range(10):
audio = pipe.run()[0]
sample_idx = it * batch_size
if sample_idx > dataset_len:
sample_idx = 0
last_sample = None
padded_sample = None
for i in range(batch_size):
if sample_idx == dataset_len - 1:
last_sample = np.array(audio[i])
elif sample_idx >= dataset_len:
padded_sample = np.array(audio[i])
np.testing.assert_array_equal(padded_sample, last_sample)
sample_idx += 1
# The manifest has 3 samples, of lengths 10000, 54321, 12345
# With batch size 2, batches will contain lengths:
# [10000, 54321], [12345, 12345], [10000, 54321], ...
# This is meant to reproduce an error found when combining pad_last_sample=True,
# using ShareData to replicate the last sample, and trying to resize to a bigger
# buffer after ShareData.
yield _testimpl_nemo_asr_reader_pad_last_batch, 2
# Trying to catch race conditions (A lot of samples in the batch to be replicated)
yield _testimpl_nemo_asr_reader_pad_last_batch, 128
def test_read_idxs():
batch_size = 10
reader_seed = 12345
@pipeline_def(device_id=0, num_threads=4)
def nemo_asr_reader_read_idxs(reader_seed=reader_seed):
audio, idx = fn.readers.nemo_asr(
manifest_filepaths=[nemo_asr_manifest], random_shuffle=True, seed=reader_seed,
read_sample_rate=False, read_text=False, read_idxs=True)
return audio, idx
seed = 12345
pipe1 = nemo_asr_reader_read_idxs(batch_size=batch_size, reader_seed=seed)
pipe1.build()
pipe2 = nemo_asr_reader_read_idxs(batch_size=batch_size, reader_seed=seed)
pipe2.build()
total_samples = len(names)
for iter in range(3):
audio1, idx1 = pipe1.run()
audio2, idx2 = pipe2.run()
for s in range(batch_size):
np.testing.assert_array_equal(np.array(audio1[s]), np.array(audio2[s]))
np.testing.assert_array_equal(np.array(idx1[s]), np.array(idx2[s]))
idx = np.array(idx1[s])[0]
assert idx >= 0 and idx < total_samples
|
DALI-main
|
dali/test/python/reader/test_nemo_asr.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali.fn as fn
import os
import tempfile
import json
from nvidia.dali import Pipeline, pipeline_def
from nose_utils import raises
from nose2.tools import params
from test_utils import compare_pipelines, get_dali_extra_path
test_data_root = get_dali_extra_path()
file_root = os.path.join(test_data_root, 'db', 'coco', 'images')
train_annotations = os.path.join(test_data_root, 'db', 'coco', 'instances.json')
class sample_desc():
def __init__(self, id, cls, mapped_cls):
self.id = id
self.cls = cls
self.mapped_cls = mapped_cls
test_data = {
'car-race-438467_1280.jpg': sample_desc(17, 5, 6),
'clock-1274699_1280.jpg': sample_desc(6, 7, 8),
'kite-1159538_1280.jpg': sample_desc(21, 12, 13),
'cow-234835_1280.jpg': sample_desc(59, 8, 9),
'home-office-336378_1280.jpg': sample_desc(39, 13, 14),
'suit-2619784_1280.jpg': sample_desc(0, 16, 17),
'business-suit-690048_1280.jpg': sample_desc(5, 16, 17),
'car-604019_1280.jpg': sample_desc(41, 5, 6)
}
images = list(test_data.keys())
expected_ids = list(s.id for s in test_data.values())
def check_operator_coco_reader_custom_order(order=None, add_invalid_paths=False):
batch_size = 2
if not order:
order = range(len(test_data))
keys = list(test_data.keys())
values = list(s.id for s in test_data.values())
images = [keys[i] for i in order]
images_arg = images.copy()
if add_invalid_paths:
images_arg += ["/invalid/path/image.png"]
expected_ids = [values[i] for i in order]
with tempfile.TemporaryDirectory() as annotations_dir:
pipeline = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
with pipeline:
_, _, _, ids = fn.readers.coco(
file_root=file_root,
annotations_file=train_annotations,
image_ids=True,
images=images_arg,
save_preprocessed_annotations=True,
save_preprocessed_annotations_dir=annotations_dir)
pipeline.set_outputs(ids)
pipeline.build()
i = 0
assert len(images) % batch_size == 0
while i < len(images):
out = pipeline.run()
for s in range(batch_size):
assert out[0].at(s) == expected_ids[i], f"{i}, {expected_ids}"
i = i + 1
filenames_file = os.path.join(annotations_dir, 'filenames.dat')
with open(filenames_file) as f:
lines = f.read().splitlines()
assert lines.sort() == images.sort()
def test_operator_coco_reader_custom_order():
custom_orders = [
None, # natural order
[0, 2, 4, 6, 1, 3, 5, 7], # altered order
[0, 1, 2, 3, 2, 1, 4, 1, 5, 2, 6, 7], # with repetitions
]
for order in custom_orders:
yield check_operator_coco_reader_custom_order, order, False
yield check_operator_coco_reader_custom_order, None, True # Natural order plus an invalid path
@params(True, False)
def test_operator_coco_reader_label_remap(avoid_remap):
batch_size = 2
images = list(test_data.keys())
ids_map = {s.id: s.cls if avoid_remap else s.mapped_cls for s in test_data.values()}
pipeline = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
with pipeline:
_, _, labels, ids = fn.readers.coco(
file_root=file_root,
annotations_file=train_annotations,
image_ids=True,
images=images,
avoid_class_remapping=avoid_remap)
pipeline.set_outputs(ids, labels)
pipeline.build()
i = 0
assert len(images) % batch_size == 0
while i < len(images):
out = pipeline.run()
for s in range(batch_size):
print(out[0].at(s), out[1].at(s))
assert ids_map[int(out[0].at(s))] == int(out[1].at(s)), \
f"{i}, {ids_map[int(out[0].at(s))]} vs {out[1].at(s)}"
i = i + 1
def test_operator_coco_reader_same_images():
file_root = os.path.join(test_data_root, 'db', 'coco_pixelwise', 'images')
train_annotations = os.path.join(test_data_root, 'db', 'coco_pixelwise', 'instances.json')
coco_dir = os.path.join(test_data_root, 'db', 'coco')
coco_dir_imgs = os.path.join(coco_dir, 'images')
coco_pixelwise_dir = os.path.join(test_data_root, 'db', 'coco_pixelwise')
coco_pixelwise_dir_imgs = os.path.join(coco_pixelwise_dir, 'images')
for file_root, _ in [
(coco_dir_imgs, os.path.join(coco_dir, 'instances.json')),
(coco_pixelwise_dir_imgs, os.path.join(coco_pixelwise_dir, 'instances.json')),
(coco_pixelwise_dir_imgs, os.path.join(coco_pixelwise_dir, 'instances_rle_counts.json'))
]:
pipe = Pipeline(batch_size=1, num_threads=4, device_id=0)
with pipe:
inputs1, boxes1, labels1, *_ = fn.readers.coco(
file_root=file_root,
annotations_file=train_annotations,
name="reader1",
seed=1234
)
inputs2, boxes2, labels2, *_ = fn.readers.coco(
file_root=file_root,
annotations_file=train_annotations,
polygon_masks=True,
name="reader2"
)
inputs3, boxes3, labels3, *_ = fn.readers.coco(
file_root=file_root,
annotations_file=train_annotations,
pixelwise_masks=True,
name="reader3"
)
pipe.set_outputs(
inputs1, boxes1, labels1,
inputs2, boxes2, labels2,
inputs3, boxes3, labels3
)
pipe.build()
epoch_sz = pipe.epoch_size("reader1")
assert epoch_sz == pipe.epoch_size("reader2")
assert epoch_sz == pipe.epoch_size("reader3")
for _ in range(epoch_sz):
inputs1, boxes1, labels1, inputs2, boxes2, labels2, inputs3, boxes3, labels3 = \
pipe.run()
np.testing.assert_array_equal(inputs1.at(0), inputs2.at(0))
np.testing.assert_array_equal(inputs1.at(0), inputs3.at(0))
np.testing.assert_array_equal(labels1.at(0), labels2.at(0))
np.testing.assert_array_equal(labels1.at(0), labels3.at(0))
np.testing.assert_array_equal(boxes1.at(0), boxes2.at(0))
np.testing.assert_array_equal(boxes1.at(0), boxes3.at(0))
@raises(RuntimeError,
glob='Argument "preprocessed_annotations_dir" is not supported by operator *readers*COCO')
def test_invalid_args():
pipeline = Pipeline(batch_size=2, num_threads=4, device_id=0)
with pipeline:
_, _, _, ids = fn.readers.coco(
file_root=file_root,
annotations_file=train_annotations,
image_ids=True,
images=images,
preprocessed_annotations_dir='/tmp')
pipeline.set_outputs(ids)
pipeline.build()
batch_size_alias_test = 64
@pipeline_def(batch_size=batch_size_alias_test, device_id=0, num_threads=4)
def coco_pipe(coco_op, file_root, annotations_file, polygon_masks, pixelwise_masks):
inputs, boxes, labels, *_ = coco_op(file_root=file_root, annotations_file=annotations_file,
polygon_masks=polygon_masks,
pixelwise_masks=pixelwise_masks)
return inputs, boxes, labels
def test_coco_reader_alias():
def check_coco_reader_alias(polygon_masks, pixelwise_masks):
new_pipe = coco_pipe(fn.readers.coco, file_root, train_annotations,
polygon_masks, pixelwise_masks)
legacy_pipe = coco_pipe(fn.coco_reader, file_root, train_annotations,
polygon_masks, pixelwise_masks)
compare_pipelines(new_pipe, legacy_pipe, batch_size_alias_test, 5)
file_root = os.path.join(test_data_root, 'db', 'coco_pixelwise', 'images')
train_annotations = os.path.join(test_data_root, 'db', 'coco_pixelwise', 'instances.json')
for polygon_masks, pixelwise_masks in [(None, None), (True, None), (None, True)]:
yield check_coco_reader_alias, polygon_masks, pixelwise_masks
@params(True, False)
def test_coco_include_crowd(include_iscrowd):
@pipeline_def(batch_size=1, device_id=0, num_threads=4)
def coco_pipe(include_iscrowd):
_, boxes, _, image_ids = fn.readers.coco(file_root=file_root,
annotations_file=train_annotations,
image_ids=True,
include_iscrowd=include_iscrowd)
return boxes, image_ids
annotations = None
with open(train_annotations) as file:
annotations = json.load(file)
pipe = coco_pipe(include_iscrowd=include_iscrowd)
pipe.build()
number_of_samples = pipe.epoch_size()
for k in number_of_samples:
# there is only one reader
number_of_samples = number_of_samples[k]
break
anno_mapping = {}
for elm in annotations["annotations"]:
image_id = elm["image_id"]
if not anno_mapping.get(image_id):
anno_mapping[image_id] = {"bbox": [], "iscrowd": []}
anno_mapping[image_id]["bbox"].append(elm["bbox"])
anno_mapping[image_id]["iscrowd"].append(elm["iscrowd"])
all_iscrowd = []
for _ in range(number_of_samples):
boxes, image_ids = pipe.run()
image_ids = int(image_ids.as_array())
boxes = boxes.as_array()[0]
anno = anno_mapping[image_ids]
idx = 0
# it assumes that the coco reader reads annotations at the order of appearance inside JSON
all_iscrowd += anno["iscrowd"]
for j, iscrowd in enumerate(anno["iscrowd"]):
if include_iscrowd or iscrowd == 0:
assert np.all(boxes[idx] == np.array(anno["bbox"][j]))
idx += 1
assert any(all_iscrowd), 'At least one annotation should include `iscrowd=1`'
def test_coco_empty_annotations_pix():
file_root = os.path.join(test_data_root, 'db', 'coco_dummy', 'images')
train_annotations = os.path.join(test_data_root, 'db', 'coco_dummy', 'instances.json')
@pipeline_def(batch_size=1, device_id=0, num_threads=4)
def coco_pipe():
_, _, _, masks, ids = fn.readers.coco(file_root=file_root,
annotations_file=train_annotations,
image_ids=True,
pixelwise_masks=True)
return masks, ids
pipe = coco_pipe()
pipe.build()
number_of_samples = pipe.epoch_size()
for k in number_of_samples:
# there is only one reader
number_of_samples = number_of_samples[k]
break
annotations = None
with open(train_annotations) as file:
annotations = json.load(file)
anno_mapping = {}
for elm in annotations["annotations"]:
image_id = elm["image_id"]
anno_mapping[image_id] = anno_mapping.get(image_id, False) or "segmentation" in elm
for _ in range(number_of_samples):
mask, image_ids = pipe.run()
image_ids = int(image_ids.as_array())
max_mask = np.max(np.array(mask.as_tensor()))
assert (max_mask != 0 and image_ids in anno_mapping and anno_mapping[image_ids]) or \
(max_mask == 0 and not (image_ids in anno_mapping and anno_mapping[image_ids]))
def test_coco_empty_annotations_poly():
file_root = os.path.join(test_data_root, 'db', 'coco_dummy', 'images')
train_annotations = os.path.join(test_data_root, 'db', 'coco_dummy', 'instances.json')
@pipeline_def(batch_size=1, device_id=0, num_threads=4)
def coco_pipe():
_, _, _, poly, vert, ids = fn.readers.coco(file_root=file_root,
annotations_file=train_annotations,
image_ids=True,
polygon_masks=True)
return poly, vert, ids
pipe = coco_pipe()
pipe.build()
number_of_samples = pipe.epoch_size()
for k in number_of_samples:
# there is only one reader
number_of_samples = number_of_samples[k]
break
annotations = None
with open(train_annotations) as file:
annotations = json.load(file)
anno_mapping = {}
for elm in annotations["annotations"]:
image_id = elm["image_id"]
anno_mapping[image_id] = anno_mapping.get(image_id, False) or "segmentation" in elm
for _ in range(number_of_samples):
poly, vert, image_ids = pipe.run()
image_ids = int(image_ids.as_array())
poly = np.array(poly.as_tensor()).size
vert = np.array(vert.as_tensor()).size
assert (poly != 0 and image_ids in anno_mapping and anno_mapping[image_ids]) or \
(vert == 0 and not (image_ids in anno_mapping and anno_mapping[image_ids]))
|
DALI-main
|
dali/test/python/reader/test_coco.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import pipeline_def, fn
import os
import glob
from test_utils import get_dali_extra_path
test_data_root = os.path.join(get_dali_extra_path(), "db", "webdataset", "legacy_index_formats")
@pipeline_def(batch_size=8, num_threads=4, device_id=0)
def wds_index_file_pipeline(idx_path, device):
jpg, cls = fn.readers.webdataset(paths=[os.path.join(test_data_root, "data.tar")],
index_paths=[idx_path], ext=['jpg', 'cls'])
if device == 'gpu':
jpg = jpg.gpu()
cls = cls.gpu()
return jpg, cls
def _test_wds_index_file_pipeline(idx_path, device):
p = wds_index_file_pipeline(idx_path, device)
p.build()
p.run()
def test_wds_index_file_pipeline():
idx_files = glob.glob(test_data_root + "/*.idx")
for idx_path in idx_files:
for device in ['cpu', 'gpu']:
yield _test_wds_index_file_pipeline, idx_path, device
|
DALI-main
|
dali/test/python/reader/test_webdataset_legacy_index_files.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nose_utils # noqa:F401
import math
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import numpy as np
import os
from test_utils import get_dali_extra_path
class COCOReaderPipeline(Pipeline):
def __init__(self,
data_paths,
batch_size,
num_threads,
shard_id,
num_gpus,
random_shuffle,
stick_to_shard,
shuffle_after_epoch,
pad_last_batch,
initial_fill=1024):
# use only 1 GPU, as we care only about shard_id
super().__init__(batch_size, num_threads, 0, prefetch_queue_depth=1)
self.input = ops.readers.COCO(file_root=data_paths[0],
annotations_file=data_paths[1],
shard_id=shard_id,
num_shards=num_gpus,
random_shuffle=random_shuffle,
image_ids=True,
stick_to_shard=stick_to_shard,
shuffle_after_epoch=shuffle_after_epoch,
pad_last_batch=pad_last_batch,
initial_fill=initial_fill)
def define_graph(self):
_, __, ___, ids = self.input(name="Reader")
return ids
test_data_root = get_dali_extra_path()
coco_folder = os.path.join(test_data_root, 'db', 'coco')
datasets = [[os.path.join(coco_folder, 'images'), os.path.join(coco_folder, 'instances.json')]]
def test_shuffling_patterns():
for dataset in datasets:
# get reference ids
ref_img_ids = []
pipe = COCOReaderPipeline(batch_size=1,
num_threads=4,
shard_id=0,
num_gpus=1,
data_paths=dataset,
random_shuffle=False,
stick_to_shard=False,
shuffle_after_epoch=False,
pad_last_batch=False)
pipe.build()
iters = pipe.epoch_size("Reader")
for _ in range(iters):
pipe.schedule_run()
ref_img_ids.append(np.concatenate(pipe.outputs()[0].as_array()))
ref_img_ids = set(np.concatenate(ref_img_ids))
for num_gpus in [1, 2, 3, 4]:
for batch_size in [1, 10, 100]:
for stick_to_shard in [True, False]:
for shuffle_after_epoch in [True, False]:
for dry_run_num in [0, 1, 2]:
yield (check_shuffling_patterns,
dataset,
num_gpus,
batch_size,
stick_to_shard,
shuffle_after_epoch,
dry_run_num,
len(ref_img_ids))
def check_shuffling_patterns(dataset,
num_gpus,
batch_size,
stick_to_shard,
shuffle_after_epoch,
dry_run_num,
len_ref_img_ids):
random_shuffle = not shuffle_after_epoch
pad_last_batch = batch_size != 1
pipes = [
COCOReaderPipeline(batch_size=batch_size,
num_threads=4,
shard_id=gpu,
num_gpus=num_gpus,
data_paths=dataset,
random_shuffle=random_shuffle,
stick_to_shard=stick_to_shard,
shuffle_after_epoch=shuffle_after_epoch,
pad_last_batch=pad_last_batch,
initial_fill=1)
for gpu in range(num_gpus)
]
if stick_to_shard and shuffle_after_epoch:
return
[pipe.build() for pipe in pipes]
dataset_size = pipes[0].epoch_size("Reader")
# dry run
for j in range(dry_run_num):
for n in range(num_gpus):
mod = j
if stick_to_shard or shuffle_after_epoch:
mod = 0
if pad_last_batch:
shard_size = dataset_size // num_gpus
else:
shard_size = (dataset_size * (n + 1 + mod) // num_gpus
- dataset_size * (n + mod) // num_gpus)
iters = shard_size // batch_size
if shard_size != iters * batch_size:
iters += 1
for _ in range(iters):
pipes[n].run()
new_img_ids = []
for n in range(num_gpus):
mod = dry_run_num
if stick_to_shard or shuffle_after_epoch:
mod = 0
if pad_last_batch:
shard_size = dataset_size // num_gpus
else:
shard_size = (dataset_size * (n + 1 + mod) // num_gpus
- dataset_size * (n + mod) // num_gpus)
iters = shard_size // batch_size
if shard_size != iters * batch_size:
iters += 1
for _ in range(iters):
val = np.concatenate(pipes[n].run()[0].as_array())
new_img_ids.append(val)
new_img_ids = set(np.concatenate(new_img_ids))
assert len(new_img_ids) == len_ref_img_ids
def gather_ids(pipes, epochs_run=0, batch_size=1, num_gpus_arg=None, gpus_arg=None):
dataset_size = pipes[0].epoch_size("Reader")
num_gpus = len(pipes)
if num_gpus_arg:
num_gpus = num_gpus_arg
iterate_over = range(num_gpus)
if gpus_arg:
iterate_over = gpus_arg
img_ids_list = [[] for _ in pipes]
# Each GPU needs to iterate from `shard_id * data_size / num_gpus` samples
# to `(shard_id + 1)* data_size / num_gpus`.
# After each epoch, each GPU moves to the next shard.
# The `epochs_run` variable takes into account that after epoch readers advance to the
# next shard. If shuffle_after_epoch or stick_to_shard is set, it doesn't matter
# and could/should be 0; it is relevant only if pad_last_batch is False, otherwise each
# shard has the same size due to padding.
for img_ids_l, pipe, n in zip(img_ids_list, pipes, iterate_over):
shard_size = (dataset_size * (n + 1 + epochs_run) // num_gpus
- dataset_size * (n + epochs_run) // num_gpus)
iters = int(math.ceil(shard_size / batch_size))
for _ in range(iters):
val = np.concatenate(pipe.run()[0].as_array())
img_ids_l.append(val)
set_list = []
for elm in img_ids_list:
set_list.append(set(np.concatenate(elm)))
if len(pipes) == 1:
return img_ids_list[0], set_list[0], epochs_run + 1
else:
return img_ids_list, set_list, epochs_run + 1
def test_global_shuffle_random_shuffle():
num_gpus = 2
batch_size = 1
pipes = [
COCOReaderPipeline(batch_size=batch_size,
num_threads=4,
shard_id=gpu,
num_gpus=num_gpus,
data_paths=datasets[0],
random_shuffle=False,
stick_to_shard=False,
shuffle_after_epoch=True,
pad_last_batch=False)
for gpu in range(num_gpus)
]
for pipe in pipes:
pipe.build()
_, img_ids_list_set, _ = gather_ids(pipes)
_, img_ids_list_set_new, _ = gather_ids(pipes)
assert img_ids_list_set[0] != img_ids_list_set_new[0]
assert img_ids_list_set[1] != img_ids_list_set_new[1]
assert img_ids_list_set[0].union(img_ids_list_set[1]) == \
img_ids_list_set_new[0].union(img_ids_list_set_new[1])
def test_global_shuffle_random_shuffle_2():
num_gpus = 1
batch_size = 1
pipes = [
COCOReaderPipeline(batch_size=batch_size,
num_threads=4,
shard_id=gpu,
num_gpus=2,
data_paths=datasets[0],
random_shuffle=False,
stick_to_shard=False,
shuffle_after_epoch=True,
pad_last_batch=False,
initial_fill=1)
for gpu in range(num_gpus)
]
for pipe in pipes:
pipe.build()
img_ids_list, img_ids_list_set, _ = gather_ids(pipes, num_gpus_arg=2, gpus_arg=[0])
assert len(img_ids_list) == len(img_ids_list_set)
img_ids_list_new, img_ids_list_new_set, _ = gather_ids(pipes, num_gpus_arg=2, gpus_arg=[0])
assert len(img_ids_list_new) == len(img_ids_list_new_set)
assert len(img_ids_list_set.intersection(img_ids_list_new_set)) != 0
def test_global_shuffle_dont_mix_epochs():
# with `random_shuffle=False` `shuffle_after_epoch=True` should
# still make data random between epochs
num_gpus = 2
batch_size = 1
pipes = [
COCOReaderPipeline(batch_size=batch_size,
num_threads=4,
shard_id=gpu,
num_gpus=num_gpus,
data_paths=datasets[0],
random_shuffle=False,
stick_to_shard=False,
shuffle_after_epoch=True,
pad_last_batch=False)
for gpu in range(num_gpus)
]
for pipe in pipes:
pipe.build()
_, img_ids_list_set, _ = gather_ids(pipes)
_, img_ids_list_set_new, _ = gather_ids(pipes)
assert img_ids_list_set[0] != img_ids_list_set_new[0]
assert img_ids_list_set[1] != img_ids_list_set_new[1]
assert img_ids_list_set[0].union(img_ids_list_set[1]) == \
img_ids_list_set_new[0].union(img_ids_list_set_new[1])
def test_dont_mix_epochs():
# with `random_shuffle=False` `shuffle_after_epoch=False` GPU0 data
# from epoch 0 should equal to data from GPU1 from epoch 1
num_gpus = 2
batch_size = 1
pipes = [
COCOReaderPipeline(batch_size=batch_size,
num_threads=4,
shard_id=gpu,
num_gpus=num_gpus,
data_paths=datasets[0],
random_shuffle=False,
stick_to_shard=False,
shuffle_after_epoch=False,
pad_last_batch=False)
for gpu in range(num_gpus)
]
for pipe in pipes:
pipe.build()
_, img_ids_list_set, epochs_run = gather_ids(pipes)
_, img_ids_list_set_new, _ = gather_ids(pipes, epochs_run)
assert img_ids_list_set[0] == img_ids_list_set_new[1]
assert img_ids_list_set[1] == img_ids_list_set_new[0]
def create_pipeline(creator, batch_size, num_gpus):
iters = 0
# make sure that data size and batch are not divisible
while iters % batch_size == 0:
while iters != 0 and iters % batch_size == 0:
batch_size += 1
pipes = [creator(gpu) for gpu in range(num_gpus)]
[pipe.build() for pipe in pipes]
iters = pipes[0].epoch_size("Reader")
iters = iters // num_gpus
return pipes, iters
def test_pad_last_batch_epoch_size():
pipe = COCOReaderPipeline(batch_size=10,
num_threads=4,
shard_id=0,
num_gpus=1,
data_paths=datasets[0],
random_shuffle=True,
stick_to_shard=False,
shuffle_after_epoch=False,
pad_last_batch=True)
pipe.build()
reference_size = pipe.epoch_size("Reader")
for num_gpus in range(1, 10):
pipe = COCOReaderPipeline(batch_size=10,
num_threads=4,
shard_id=0,
num_gpus=num_gpus,
data_paths=datasets[0],
random_shuffle=True,
stick_to_shard=False,
shuffle_after_epoch=False,
pad_last_batch=True)
pipe.build()
size = pipe.epoch_size("Reader")
print(reference_size, size, num_gpus)
assert size == int(math.ceil(reference_size * 1.0 / num_gpus)) * num_gpus
def test_pad_last_batch():
num_gpus = 1
batch_size = 100
pipes, iters = create_pipeline(lambda gpu: COCOReaderPipeline(batch_size=batch_size,
num_threads=4,
shard_id=gpu,
num_gpus=num_gpus,
data_paths=datasets[0],
random_shuffle=True,
stick_to_shard=False,
shuffle_after_epoch=False,
pad_last_batch=True),
batch_size, num_gpus)
img_ids_list, _, epochs_run = gather_ids(pipes, batch_size=batch_size)
img_ids_list = np.concatenate(img_ids_list)
img_ids_list_set = set(img_ids_list)
# check number of repeated samples
remainder = int(math.ceil(iters * 1.0 / batch_size)) * batch_size - iters
# check if repeated samples are equal to the last one
mirrored_data = img_ids_list[-remainder - 1:]
print(iters, remainder, set(mirrored_data), img_ids_list)
assert len(set(mirrored_data)) == 1
assert len(img_ids_list) != len(img_ids_list_set)
next_img_ids_list, _, _ = gather_ids(pipes, epochs_run, batch_size=batch_size)
next_img_ids_list = np.concatenate(next_img_ids_list)
next_img_ids_list_set = set(next_img_ids_list)
mirrored_data = next_img_ids_list[-remainder - 1:]
print(set(mirrored_data))
assert len(set(mirrored_data)) == 1
assert len(next_img_ids_list) != len(next_img_ids_list_set)
|
DALI-main
|
dali/test/python/reader/test_shuffling.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from glob import glob
import math
import nvidia.dali as dali
from test_utils import compare_pipelines, get_dali_extra_path
from nose_utils import assert_raises
from nose.tools import assert_equal
from webdataset_base import (generate_temp_extract, generate_temp_index_file,
webdataset_raw_pipeline, file_reader_pipeline)
from webdataset_base import test_batch_size # noqa:F401, this is a parameter used in tests
def test_return_empty():
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/missing.tar")
index_file = generate_temp_index_file(tar_file_path)
extract_dir = generate_temp_extract(tar_file_path)
equivalent_files = glob(extract_dir.name + "/*")
equivalent_files = sorted(equivalent_files,
key=(lambda s: int(s[s.rfind("/") + 1 : s.rfind(".")]))) # noqa: 203
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "txt"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
missing_component_behavior="empty",
),
file_reader_pipeline(equivalent_files, ["jpg", []], batch_size=test_batch_size, device_id=0,
num_threads=1),
test_batch_size,
math.ceil(num_samples / test_batch_size),
)
def test_skip_sample():
num_samples = 500
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/missing.tar")
index_file = generate_temp_index_file(tar_file_path)
extract_dir = generate_temp_extract(tar_file_path)
equivalent_files = list(
filter(
lambda s: int(s[s.rfind("/") + 1 : s.rfind(".")]) < 2500, # noqa: 203
sorted(glob(extract_dir.name + "/*"),
key=lambda s: int(s[s.rfind("/") + 1 : s.rfind(".")])), # noqa: 203
))
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
missing_component_behavior="skip",
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
file_reader_pipeline(equivalent_files, ["jpg", "cls"],
batch_size=test_batch_size, device_id=0, num_threads=1),
test_batch_size,
math.ceil(num_samples / test_batch_size),
)
wds_pipeline = webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
missing_component_behavior="skip",
batch_size=test_batch_size,
device_id=0,
num_threads=1,
)
wds_pipeline.build()
assert_equal(list(wds_pipeline.epoch_size().values())[0], num_samples)
def test_raise_error_on_missing():
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/missing.tar")
index_file = generate_temp_index_file(tar_file_path)
wds_pipeline = webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
missing_component_behavior="error",
batch_size=test_batch_size,
device_id=0,
num_threads=1,
)
assert_raises(RuntimeError, wds_pipeline.build, glob="Underful sample detected")
def test_different_components():
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/scrambled.tar")
index_file = generate_temp_index_file(tar_file_path)
extract_dir = generate_temp_extract(tar_file_path)
equivalent_files = glob(extract_dir.name + "/*")
equivalent_files = sorted(equivalent_files,
key=(lambda s: int(s[s.rfind("/") + 1 : s.rfind(".")]))) # noqa: 203
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "txt;cls"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
file_reader_pipeline(equivalent_files, ["jpg", {"txt", "cls"}],
batch_size=test_batch_size, device_id=0, num_threads=1),
test_batch_size,
math.ceil(num_samples / test_batch_size),
)
def test_dtypes():
num_samples = 100
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/sample-tar/dtypes.tar")
index_file = generate_temp_index_file(tar_file_path)
wds_pipeline = webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["float16", "int32", "float64"],
dtypes=[dali.types.FLOAT16, dali.types.INT32, dali.types.FLOAT64],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
)
wds_pipeline.build()
for sample_idx in range(num_samples):
if sample_idx % test_batch_size == 0:
f16, i32, f64 = wds_pipeline.run()
assert (f16.as_array()[sample_idx % test_batch_size] == [float(sample_idx)] * 10).all()
assert (i32.as_array()[sample_idx % test_batch_size] == [int(sample_idx)] * 10).all()
assert (f64.as_array()[sample_idx % test_batch_size] == [float(sample_idx)] * 10).all()
def test_wds_sharding():
num_samples = 3000
tar_file_paths = [
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar"),
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-1.tar"),
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-2.tar"),
]
index_files = [generate_temp_index_file(tar_file_path) for tar_file_path in tar_file_paths]
extract_dirs = [generate_temp_extract(tar_file_path) for tar_file_path in tar_file_paths]
equivalent_files = sum(
list(
sorted(glob(extract_dir.name +
"/*"), key=lambda s: int(s[s.rfind("/") + 1 : s.rfind(".")])) # noqa: 203
for extract_dir in extract_dirs),
[],
)
compare_pipelines(
webdataset_raw_pipeline(
tar_file_paths,
[index_file.name for index_file in index_files],
["jpg", "cls"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
file_reader_pipeline(
equivalent_files,
["jpg", "cls"],
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / test_batch_size),
)
def test_sharding():
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar")
index_file = generate_temp_index_file(tar_file_path)
extract_dir = generate_temp_extract(tar_file_path)
equivalent_files = sorted(glob(extract_dir.name + "/*"),
key=lambda s: int(s[s.rfind("/") + 1 : s.rfind(".")])) # noqa: 203
num_shards = 100
for shard_id in range(num_shards):
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
file_reader_pipeline(
equivalent_files,
["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / num_shards / test_batch_size) * 2,
)
def test_pax_format():
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar")
pax_tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/pax/devel-0.tar")
index_file = generate_temp_index_file(tar_file_path)
num_shards = 100
for shard_id in range(num_shards):
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
webdataset_raw_pipeline(
pax_tar_file_path,
None,
ext=["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / num_shards / test_batch_size) * 2,
)
def test_case_sensitive_container_format():
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar")
case_insensitive_tar_file_path = os.path.join(get_dali_extra_path(),
"db/webdataset/case_insensitive/devel-0.tar")
index_file = generate_temp_index_file(tar_file_path)
num_shards = 100
with assert_raises(RuntimeError, glob="Underful sample detected at"):
for shard_id in range(num_shards):
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
webdataset_raw_pipeline(
case_insensitive_tar_file_path,
None,
ext=["jpg", "cls"],
missing_component_behavior="error",
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / num_shards / test_batch_size) * 2,
)
def test_case_sensitive_arg_format():
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar")
index_file = generate_temp_index_file(tar_file_path)
num_shards = 100
with assert_raises(RuntimeError, glob="Underful sample detected at"):
for shard_id in range(num_shards):
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
ext=["Jpg", "cls"],
missing_component_behavior="error",
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / num_shards / test_batch_size) * 2,
)
def test_case_insensitive_container_format():
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar")
case_insensitive_tar_file_path = os.path.join(get_dali_extra_path(),
"db/webdataset/case_insensitive/devel-0.tar")
index_file = generate_temp_index_file(tar_file_path)
num_shards = 100
for shard_id in range(num_shards):
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
webdataset_raw_pipeline(
case_insensitive_tar_file_path,
None,
ext=["jpg", "cls"],
case_sensitive_extensions=False,
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / num_shards / test_batch_size) * 2,
)
def test_case_insensitive_arg_format():
num_samples = 1000
tar_file_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar")
index_file = generate_temp_index_file(tar_file_path)
num_shards = 100
for shard_id in range(num_shards):
compare_pipelines(
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
webdataset_raw_pipeline(
tar_file_path,
index_file.name,
ext=["Jpg", "cls"],
case_sensitive_extensions=False,
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / num_shards / test_batch_size) * 2,
)
def test_index_generation():
num_samples = 3000
tar_file_paths = [
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar"),
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-1.tar"),
os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-2.tar"),
]
extract_dirs = [generate_temp_extract(tar_file_path) for tar_file_path in tar_file_paths]
equivalent_files = sum(
list(
sorted(glob(extract_dir.name +
"/*"), key=lambda s: int(s[s.rfind("/") + 1 : s.rfind(".")])) # noqa: 203
for extract_dir in extract_dirs),
[],
)
num_shards = 100
for shard_id in range(num_shards):
compare_pipelines(
webdataset_raw_pipeline(
tar_file_paths,
[],
["jpg", "cls"],
missing_component_behavior="error",
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
file_reader_pipeline(
equivalent_files,
["jpg", "cls"],
num_shards=num_shards,
shard_id=shard_id,
batch_size=test_batch_size,
device_id=0,
num_threads=1,
),
test_batch_size,
math.ceil(num_samples / num_shards / test_batch_size) * 2,
)
|
DALI-main
|
dali/test/python/reader/test_webdataset_requirements.py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import Pipeline, pipeline_def
import nvidia.dali.ops as ops
import nvidia.dali.fn as fn
import nvidia.dali.types as types
from numpy.testing import assert_array_equal
import os
from test_utils import compare_pipelines
from test_utils import get_dali_extra_path
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
c2lmdb_db_folder = os.path.join(test_data_root, 'db', 'c2lmdb')
c2lmdb_no_label_db_folder = os.path.join(test_data_root, 'db', 'c2lmdb_no_label')
class CaffeReaderPipeline(Pipeline):
def __init__(self, path, batch_size, num_threads=1, device_id=0, num_gpus=1):
super(CaffeReaderPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.readers.Caffe(path=path, shard_id=device_id, num_shards=num_gpus)
self.decode = ops.decoders.ImageCrop(device="cpu",
crop=(224, 224),
crop_pos_x=0.3,
crop_pos_y=0.2,
output_type=types.RGB)
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
return images, labels
def check_reader_path_vs_paths(paths, batch_size1, batch_size2, num_threads1, num_threads2):
"""
test: compare caffe_db_folder with [caffe_db_folder] and [caffe_db_folder, caffe_db_folder],
with different batch_size and num_threads
"""
pipe1 = CaffeReaderPipeline(caffe_db_folder, batch_size1, num_threads1)
pipe1.build()
pipe2 = CaffeReaderPipeline(paths, batch_size2, num_threads2)
pipe2.build()
def Seq(pipe):
while True:
pipe_out = pipe.run()
for idx in range(len(pipe_out[0])):
yield pipe_out[0].at(idx), pipe_out[1].at(idx)
seq1 = Seq(pipe1)
seq2 = Seq(pipe2)
num_entries = 100
for i in range(num_entries):
image1, label1 = next(seq1)
image2, label2 = next(seq2)
assert_array_equal(image1, image2)
assert_array_equal(label1, label2)
def test_reader_path_vs_paths():
for paths in [[caffe_db_folder], [caffe_db_folder, caffe_db_folder]]:
for batch_size1 in {1}:
for batch_size2 in {1, 16, 31}:
for num_threads1 in {1}:
for num_threads2 in {1, 2}:
yield check_reader_path_vs_paths, paths, \
batch_size1, batch_size2, num_threads1, num_threads2
batch_size_alias_test = 64
@pipeline_def(batch_size=batch_size_alias_test, device_id=0, num_threads=4)
def caffe_pipe(caffe_op, path):
data, label = caffe_op(path=path)
return data, label
def test_caffe_reader_alias():
new_pipe = caffe_pipe(fn.readers.caffe, caffe_db_folder)
legacy_pipe = caffe_pipe(fn.caffe_reader, caffe_db_folder)
compare_pipelines(new_pipe, legacy_pipe, batch_size_alias_test, 50)
@pipeline_def(batch_size=batch_size_alias_test, device_id=0, num_threads=4)
def caffe2_pipe(caffe2_op, path, label_type):
if label_type == 4:
data = caffe2_op(path=path, label_type=label_type)
return data
else:
data, label = caffe2_op(path=path, label_type=label_type)
return data, label
def check_caffe2(label_type):
path = c2lmdb_no_label_db_folder if label_type == 4 else c2lmdb_db_folder
new_pipe = caffe2_pipe(fn.readers.caffe2, path, label_type)
legacy_pipe = caffe2_pipe(fn.caffe2_reader, path, label_type)
compare_pipelines(new_pipe, legacy_pipe, batch_size_alias_test, 50)
def test_caffe2_reader_alias():
for label_type in [0, 4]:
yield check_caffe2, label_type
|
DALI-main
|
dali/test/python/reader/test_caffe.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import numpy as np
import nvidia.dali.fn as fn
import os
import random
import tempfile
from nvidia.dali import Pipeline, pipeline_def
from nose_utils import assert_raises
from test_utils import compare_pipelines
def ref_contents(path):
fname = path[path.rfind('/') + 1:]
return "Contents of " + fname + ".\n"
def populate(root, files):
for fname in files:
with open(os.path.join(root, fname), "w") as f:
f.write(ref_contents(fname))
g_root = None
g_tmpdir = None
g_files = None
def setUpModule():
global g_root
global g_files
global g_tmpdir
g_tmpdir = tempfile.TemporaryDirectory()
g_root = g_tmpdir.__enter__()
g_files = [str(i) + ' x.dat' for i in range(10)] # name with a space in the middle!
populate(g_root, g_files)
def tearDownModule():
global g_root
global g_files
global g_tmpdir
g_tmpdir.__exit__(None, None, None)
g_tmpdir = None
g_root = None
g_files = None
def _test_reader_files_arg(use_root, use_labels, shuffle):
root = g_root
fnames = g_files
if not use_root:
fnames = [os.path.join(root, f) for f in fnames]
root = None
lbl = None
if use_labels:
lbl = [10000 + i for i in range(len(fnames))]
batch_size = 3
pipe = Pipeline(batch_size, 1, 0)
files, labels = fn.readers.file(file_root=root, files=fnames, labels=lbl,
random_shuffle=shuffle)
pipe.set_outputs(files, labels)
pipe.build()
num_iters = (len(fnames) + 2 * batch_size) // batch_size
for i in range(num_iters):
out_f, out_l = pipe.run()
for j in range(batch_size):
contents = bytes(out_f.at(j)).decode('utf-8')
label = out_l.at(j)[0]
index = label - 10000 if use_labels else label
assert contents == ref_contents(fnames[index])
def test_file_reader():
for use_root in [False, True]:
for use_labels in [False, True]:
for shuffle in [False, True]:
yield _test_reader_files_arg, use_root, use_labels, shuffle
def test_file_reader_relpath():
batch_size = 3
rel_root = os.path.relpath(g_root, os.getcwd())
fnames = [os.path.join(rel_root, f) for f in g_files]
pipe = Pipeline(batch_size, 1, 0)
files, labels = fn.readers.file(files=fnames, random_shuffle=True)
pipe.set_outputs(files, labels)
pipe.build()
num_iters = (len(fnames) + 2 * batch_size) // batch_size
for i in range(num_iters):
out_f, out_l = pipe.run()
for j in range(batch_size):
contents = bytes(out_f.at(j)).decode('utf-8')
index = out_l.at(j)[0]
assert contents == ref_contents(fnames[index])
def test_file_reader_relpath_file_list():
batch_size = 3
fnames = g_files
list_file = os.path.join(g_root, "list.txt")
with open(list_file, "w") as f:
for i, name in enumerate(fnames):
f.write("{0} {1}\n".format(name, 10000 - i))
pipe = Pipeline(batch_size, 1, 0)
files, labels = fn.readers.file(file_list=list_file, random_shuffle=True)
pipe.set_outputs(files, labels)
pipe.build()
num_iters = (len(fnames) + 2 * batch_size) // batch_size
for i in range(num_iters):
out_f, out_l = pipe.run()
for j in range(batch_size):
contents = bytes(out_f.at(j)).decode('utf-8')
label = out_l.at(j)[0]
index = 10000 - label
assert contents == ref_contents(fnames[index])
def _test_file_reader_filter(filters, glob_filters, batch_size, num_threads, subpath,
case_sensitive_filter):
pipe = Pipeline(batch_size, num_threads, 0)
root = os.path.join(os.environ['DALI_EXTRA_PATH'], subpath)
files, labels = fn.readers.file(file_root=root, file_filters=filters,
case_sensitive_filter=case_sensitive_filter)
pipe.set_outputs(files, labels)
pipe.build()
fnames = set()
for label, dir in enumerate(sorted(next(os.walk(root))[1])):
for filter in glob_filters:
for file in glob.glob(os.path.join(root, dir, filter)):
fnames.add((label, file.split('/')[-1], file))
fnames = sorted(fnames)
for i in range(len(fnames) // batch_size):
out_f, _ = pipe.run()
for j in range(batch_size):
with open(fnames[i * batch_size + j][2], 'rb') as file:
contents = np.array(list(file.read()))
assert all(contents == out_f.at(j))
def test_file_reader_filters():
for filters in [['*.jpg'], ['*.jpg', '*.png', '*.jpeg'], ['dog*.jpg', 'cat*.png', '*.jpg']]:
num_threads = random.choice([1, 2, 4, 8])
batch_size = random.choice([1, 3, 10])
yield _test_file_reader_filter, filters, filters, batch_size, num_threads, \
'db/single/mixed', False
yield _test_file_reader_filter, ['*.jPg', '*.JPg'], \
['*.jPg', '*.JPg'], 3, 1, 'db/single/case_sensitive', True
yield _test_file_reader_filter, ['*.JPG'], \
['*.jpg', '*.jpG', '*.jPg', '*.jPG', '*.Jpg', '*.JpG', '*.JPg', '*.JPG'], \
3, 1, 'db/single/case_sensitive', False
batch_size_alias_test = 64
@pipeline_def(batch_size=batch_size_alias_test, device_id=0, num_threads=4)
def file_pipe(file_op, file_list):
files, labels = file_op(file_list=file_list)
return files, labels
def test_file_reader_alias():
fnames = g_files
file_list = os.path.join(g_root, "list.txt")
with open(file_list, "w") as f:
for i, name in enumerate(fnames):
f.write("{0} {1}\n".format(name, 10000 - i))
new_pipe = file_pipe(fn.readers.file, file_list)
legacy_pipe = file_pipe(fn.file_reader, file_list)
compare_pipelines(new_pipe, legacy_pipe, batch_size_alias_test, 50)
def test_invalid_number_of_shards():
@pipeline_def(batch_size=1, device_id=0, num_threads=4)
def get_test_pipe():
root = os.path.join(os.environ['DALI_EXTRA_PATH'], 'db/single/mixed')
files, labels = fn.readers.file(file_root=root, shard_id=0, num_shards=9999)
return files, labels
pipe = get_test_pipe()
assert_raises(RuntimeError, pipe.build,
glob="The number of input samples: *, needs to be at least equal to the requested number of shards:*.") # noqa: E501
|
DALI-main
|
dali/test/python/reader/test_file.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
from glob import glob
import webdataset_base as base
from test_utils import compare_pipelines, get_dali_extra_path
def cross_check(dont_use_mmap, batch_size, num_shards, shard_id, skip_cached_images, pad_last_batch,
stick_to_shard, ):
num_multiplications = 4
num_samples = 20 * num_multiplications
tar_file_paths = [os.path.join(get_dali_extra_path(),
"db/webdataset/sample-tar/cross.tar")] * num_multiplications
index_files = [base.generate_temp_index_file(tar_file_path) for tar_file_path in tar_file_paths]
extract_dirs = [base.generate_temp_extract(tar_file_path) for tar_file_path in tar_file_paths]
equivalent_files = sum(
(
sorted(
glob(extract_dir.name + "/*"),
key=lambda s: (int(s[s.rfind("/") + 1: s.find(".")]), s),
)
for extract_dir in extract_dirs
),
[],
)
compare_pipelines(
base.webdataset_raw_pipeline(
tar_file_paths,
[index_file.name for index_file in index_files],
["a.a;a.b;a.a;a.b", "b.a;b.b;b.a;b.b"],
batch_size=batch_size,
device_id=0,
num_threads=10,
dont_use_mmap=dont_use_mmap,
num_shards=num_shards,
shard_id=shard_id,
prefetch_queue_depth=8,
skip_cached_images=skip_cached_images,
pad_last_batch=pad_last_batch,
stick_to_shard=stick_to_shard,
),
base.file_reader_pipeline(
equivalent_files,
["a.a", "b.a"],
batch_size=batch_size,
device_id=0,
num_threads=10,
dont_use_mmap=True,
num_shards=num_shards,
shard_id=shard_id,
skip_cached_images=skip_cached_images,
pad_last_batch=pad_last_batch,
stick_to_shard=stick_to_shard,
),
batch_size,
math.ceil(num_samples / base.test_batch_size),
)
def test_cross_check():
scenarios = [
(
dont_use_mmap,
batch_size,
num_shards,
shard_id,
skip_cached_images,
pad_last_batch,
stick_to_shard,
)
for dont_use_mmap in (False, True)
for stick_to_shard in (False, True)
for pad_last_batch in (False, True)
for skip_cached_images in (False, True)
for batch_size in (1, 8) if batch_size != 1 or not pad_last_batch
for num_shards in (1, 80)
for shard_id in {0, num_shards - 1}
]
for args in scenarios:
yield (cross_check,) + args
|
DALI-main
|
dali/test/python/reader/test_webdataset_big.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import Pipeline, pipeline_def
import nvidia.dali.ops as ops
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import nvidia.dali.tfrecord as tfrec
import os.path
import tempfile
import numpy as np
from test_utils import compare_pipelines, get_dali_extra_path
from nose_utils import assert_raises
from nose2.tools import cartesian_params
from nose import SkipTest
def skip_second(src, dst):
with open(src, 'r') as tmp_f:
with open(dst, 'w') as f:
second = False
for x in tmp_f:
if not second:
f.write(x)
second = not second
def test_tfrecord():
class TFRecordPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus, data, data_idx):
super(TFRecordPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.readers.TFRecord(
path=data,
index_path=data_idx,
features={
"image/encoded": tfrec.FixedLenFeature((), tfrec.string, ""),
"image/class/label": tfrec.FixedLenFeature([1], tfrec.int64, -1)})
def define_graph(self):
inputs = self.input(name="Reader")
images = inputs["image/encoded"]
return images
tfrecord = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train')
tfrecord_idx_org = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train.idx')
tfrecord_idx = "tfr_train.idx"
idx_files_dir = tempfile.TemporaryDirectory()
idx_file = os.path.join(idx_files_dir.name, tfrecord_idx)
skip_second(tfrecord_idx_org, idx_file)
pipe = TFRecordPipeline(1, 1, 0, 1, tfrecord, idx_file)
pipe_org = TFRecordPipeline(1, 1, 0, 1, tfrecord, tfrecord_idx_org)
pipe.build()
pipe_org.build()
iters = pipe.epoch_size("Reader")
for _ in range(iters):
out = pipe.run()
out_ref = pipe_org.run()
for a, b in zip(out, out_ref):
assert np.array_equal(a.as_array(), b.as_array())
_ = pipe_org.run()
def test_tfrecord_odirect():
batch_size = 16
@pipeline_def(batch_size=batch_size, device_id=0, num_threads=4)
def tfrecord_pipe(path, index_path, dont_use_mmap, use_o_direct):
input = fn.readers.tfrecord(
path=path,
index_path=index_path,
dont_use_mmap=dont_use_mmap,
use_o_direct=use_o_direct,
features={
"image/class/label": tfrec.FixedLenFeature([1], tfrec.int64, -1)},
name="Reader")
return input["image/class/label"]
tfrecord = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train')
tfrecord_idx = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train.idx')
pipe = tfrecord_pipe(tfrecord, tfrecord_idx, True, True)
pipe_ref = tfrecord_pipe(tfrecord, tfrecord_idx, False, False)
pipe.build()
pipe_ref.build()
iters = (pipe.epoch_size("Reader") + batch_size) // batch_size
for _ in range(iters):
out = pipe.run()
out_ref = pipe_ref.run()
for a, b in zip(out, out_ref):
assert np.array_equal(a.as_array(), b.as_array())
@cartesian_params(((1, 2, 1), (3, 1, 2)),
(True, False),
(True, False))
def test_tfrecord_pad_last_batch(batch_description, dont_use_mmap, use_o_direct):
if not dont_use_mmap and use_o_direct:
raise SkipTest("Cannot use O_DIRECT with mmap")
num_samples, batch_size, num_shards = batch_description
@pipeline_def(batch_size=batch_size, device_id=0, num_threads=4)
def tfrecord_pipe(path, index_path, dont_use_mmap, use_o_direct):
input = fn.readers.tfrecord(
path=path,
index_path=index_path,
num_shards=num_shards,
dont_use_mmap=dont_use_mmap,
use_o_direct=use_o_direct,
features={
"image/class/label": tfrec.FixedLenFeature([1], tfrec.int64, -1)},
name="Reader")
return input["image/class/label"]
tfrecord = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train')
tfrecord_idx = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train.idx')
idx_files_dir = tempfile.TemporaryDirectory()
recordio_idx = "rio_train.idx"
idx_file = os.path.join(idx_files_dir.name, recordio_idx)
def leave_only_N(src, dst, n):
with open(src, 'r') as tmp_f:
with open(dst, 'w') as f:
for i, x in enumerate(tmp_f):
if i == n:
break
f.write(x)
leave_only_N(tfrecord_idx, idx_file, num_samples)
pipe = tfrecord_pipe(tfrecord, idx_file, dont_use_mmap, use_o_direct)
pipe_ref = tfrecord_pipe(tfrecord, idx_file, False, False)
pipe.build()
pipe_ref.build()
iters = (pipe.epoch_size("Reader") + batch_size) // batch_size
for _ in range(iters):
out = pipe.run()
out_ref = pipe_ref.run()
for a, b in zip(out, out_ref):
assert np.array_equal(a.as_array(), b.as_array())
def test_recordio():
class MXNetReaderPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus, data, data_idx):
super(MXNetReaderPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.readers.MXNet(path=[data], index_path=[data_idx],
shard_id=device_id, num_shards=num_gpus)
def define_graph(self):
images, _ = self.input(name="Reader")
return images
recordio = os.path.join(get_dali_extra_path(), 'db', 'recordio', 'train.rec')
recordio_idx_org = os.path.join(get_dali_extra_path(), 'db', 'recordio', 'train.idx')
recordio_idx = "rio_train.idx"
idx_files_dir = tempfile.TemporaryDirectory()
idx_file = os.path.join(idx_files_dir.name, recordio_idx)
skip_second(recordio_idx_org, idx_file)
pipe = MXNetReaderPipeline(1, 1, 0, 1, recordio, idx_file)
pipe_org = MXNetReaderPipeline(1, 1, 0, 1, recordio, recordio_idx_org)
pipe.build()
pipe_org.build()
iters = pipe.epoch_size("Reader")
for _ in range(iters):
out = pipe.run()
out_ref = pipe_org.run()
for a, b in zip(out, out_ref):
assert np.array_equal(a.as_array(), b.as_array())
_ = pipe_org.run()
def test_wrong_feature_shape():
features = {
'image/encoded': tfrec.FixedLenFeature((), tfrec.string, ""),
'image/object/bbox': tfrec.FixedLenFeature([], tfrec.float32, -1.0),
'image/object/class/label': tfrec.FixedLenFeature([], tfrec.int64, -1),
}
test_dummy_data_path = os.path.join(get_dali_extra_path(), 'db', 'coco_dummy')
pipe = Pipeline(1, 1, 0)
with pipe:
input = fn.readers.tfrecord(
path=os.path.join(test_dummy_data_path, 'small_coco.tfrecord'),
index_path=os.path.join(test_dummy_data_path, 'small_coco_index.idx'),
features=features)
pipe.set_outputs(input['image/encoded'],
input['image/object/class/label'],
input['image/object/bbox'])
pipe.build()
# the error is raised because FixedLenFeature is used with insufficient shape to house the input
assert_raises(RuntimeError,
pipe.run,
glob="Error when executing CPU operator*readers*tfrecord*"
"Output tensor shape is too small*[]*Expected at least 4 elements")
batch_size_alias_test = 64
@pipeline_def(batch_size=batch_size_alias_test, device_id=0, num_threads=4)
def mxnet_pipe(mxnet_op, path, index_path):
files, labels = mxnet_op(path=path, index_path=index_path)
return files, labels
def test_mxnet_reader_alias():
recordio = [os.path.join(get_dali_extra_path(), 'db', 'recordio', 'train.rec')]
recordio_idx = [os.path.join(get_dali_extra_path(), 'db', 'recordio', 'train.idx')]
new_pipe = mxnet_pipe(fn.readers.mxnet, recordio, recordio_idx)
legacy_pipe = mxnet_pipe(fn.mxnet_reader, recordio, recordio_idx)
compare_pipelines(new_pipe, legacy_pipe, batch_size_alias_test, 50)
@pipeline_def(batch_size=batch_size_alias_test, device_id=0, num_threads=4)
def tfrecord_pipe(tfrecord_op, path, index_path):
inputs = tfrecord_op(
path=path,
index_path=index_path,
features={
"image/encoded": tfrec.FixedLenFeature((), tfrec.string, ""),
"image/class/label": tfrec.FixedLenFeature([1], tfrec.int64, -1)})
return inputs["image/encoded"]
def test_tfrecord_reader_alias():
tfrecord = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train')
tfrecord_idx = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train.idx')
new_pipe = tfrecord_pipe(fn.readers.tfrecord, tfrecord, tfrecord_idx)
legacy_pipe = tfrecord_pipe(fn.tfrecord_reader, tfrecord, tfrecord_idx)
compare_pipelines(new_pipe, legacy_pipe, batch_size_alias_test, 50)
@pipeline_def(batch_size=batch_size_alias_test, device_id=0, num_threads=4)
def tfrecord_pipe_empty_fields(path, index_path):
inputs = fn.readers.tfrecord(
path=path, index_path=index_path,
features={"image/encoded": tfrec.FixedLenFeature((), tfrec.string, ""),
"image/class/label": tfrec.FixedLenFeature([1], tfrec.int64, -1),
"does/not/exists": tfrec.VarLenFeature(tfrec.int64, -1),
"does/not/exists/as/well": tfrec.FixedLenFeature([1], tfrec.float32, .0)})
return inputs["image/encoded"], inputs["does/not/exists"], inputs["does/not/exists/as/well"]
def test_tfrecord_reader_alias2():
tfrecord = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train')
tfrecord_idx = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train.idx')
pipe = tfrecord_pipe_empty_fields(tfrecord, tfrecord_idx)
pipe.build()
out = pipe.run()
for tensor in out[0]:
data = np.array(tensor)
assert len(data) != 0
assert data.dtype == np.uint8
for tensor in out[1]:
data = np.array(tensor)
assert len(data.shape) == 0
assert data.dtype == np.int64
for tensor in out[2]:
data = np.array(tensor)
assert len(data.shape) == 0
assert data.dtype == np.float32
def test_tfrecord_reader_scalars():
test_dummy_data_path = os.path.join(get_dali_extra_path(), 'db', 'coco_dummy')
@pipeline_def(batch_size=batch_size_alias_test, device_id=0, num_threads=4)
def tfrecord_pipe_scalars():
data = fn.readers.tfrecord(
path=os.path.join(test_dummy_data_path, 'small_coco.tfrecord'),
index_path=os.path.join(test_dummy_data_path, 'small_coco_index.idx'),
features={
'image/height': tfrec.FixedLenFeature((), tfrec.int64, -1),
})
return data['image/height']
pipe = tfrecord_pipe_scalars()
pipe.build()
out = pipe.run()
for tensor in out[0]:
data = np.array(tensor)
assert data.dtype == np.int64
assert data.shape == (), f"Unexpected shape. Expected scalar, got {data.shape}"
def test_conditionals():
tfrecord = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train')
tfrecord_idx = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train.idx')
@pipeline_def()
def get_dali_pipeline(tfrec_filenames, tfrec_idx_filenames, shard_id, num_gpus):
inputs = fn.readers.tfrecord(
path=tfrec_filenames, index_path=tfrec_idx_filenames, random_shuffle=True,
shard_id=shard_id, num_shards=num_gpus, initial_fill=10000, seed=42, features={
'image/encoded': tfrec.FixedLenFeature((), tfrec.string, ""),
'image/class/label': tfrec.FixedLenFeature([1], tfrec.int64, -1),
'image/class/text': tfrec.FixedLenFeature([], tfrec.string, ''),
'image/object/bbox/xmin': tfrec.VarLenFeature(tfrec.float32, 0.0),
'image/object/bbox/ymin': tfrec.VarLenFeature(tfrec.float32, 0.0),
'image/object/bbox/xmax': tfrec.VarLenFeature(tfrec.float32, 0.0),
'image/object/bbox/ymax': tfrec.VarLenFeature(tfrec.float32, 0.0)
})
encoded = inputs["image/encoded"]
images = fn.decoders.image(encoded, device="mixed", output_type=types.RGB)
images = fn.resize(images, device="gpu", resize_shorter=256)
labels = inputs["image/class/label"].gpu()
labels -= 1 # Change to 0-based (don't use background class)
return images, labels
pipe_base = get_dali_pipeline(tfrecord, tfrecord_idx, shard_id=0, num_gpus=1, device_id=0,
num_threads=4, batch_size=32)
pipe_cond = get_dali_pipeline(tfrecord, tfrecord_idx, shard_id=0, num_gpus=1, device_id=0,
num_threads=4, batch_size=32, enable_conditionals=True)
for pipe in [pipe_base, pipe_cond]:
pipe.build()
compare_pipelines(pipe_base, pipe_cond, 32, 5)
|
DALI-main
|
dali/test/python/reader/test_index.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import Pipeline, pipeline_def
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import numpy as np
from numpy.testing import assert_array_equal
import os
import platform
import random
import tempfile
from nose_utils import assert_raises
from nose import SkipTest
from nose2.tools import params, cartesian_params
from test_utils import compare_pipelines, to_array
gds_data_root = '/scratch/'
if not os.path.isdir(gds_data_root):
gds_data_root = os.getcwd() + "/scratch/"
if not os.path.isdir(gds_data_root):
os.mkdir(gds_data_root)
assert os.path.isdir(gds_data_root)
# GDS beta is supported only on x86_64 and compute cap 6.0 >=0
is_gds_supported_var = None
def is_gds_supported(device_id=0):
global is_gds_supported_var
if is_gds_supported_var is not None:
return is_gds_supported_var
compute_cap = 0
try:
import pynvml
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(device_id)
compute_cap = pynvml.nvmlDeviceGetCudaComputeCapability(handle)
compute_cap = compute_cap[0] + compute_cap[1] / 10.
except ModuleNotFoundError:
print("Python bindings for NVML not found")
is_gds_supported_var = platform.processor() == "x86_64" and compute_cap >= 6.0
return is_gds_supported_var
def create_numpy_file(filename, shape, typ, fortran_order):
# generate random array
arr = rng.random_sample(shape) * 10.
arr = arr.astype(typ)
if fortran_order:
arr = np.asfortranarray(arr)
np.save(filename, arr)
def delete_numpy_file(filename):
if os.path.isfile(filename):
os.remove(filename)
def NumpyReaderPipeline(path, batch_size, device="cpu", file_list=None, files=None,
file_filter="*.npy", num_threads=1, device_id=0,
cache_header_information=False, pad_last_batch=False,
dont_use_mmap=False, enable_o_direct=False,
shard_id=0, num_shards=1):
pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id)
data = fn.readers.numpy(device=device,
file_list=file_list,
files=files,
file_root=path,
file_filter=file_filter,
shard_id=shard_id,
num_shards=num_shards,
cache_header_information=cache_header_information,
pad_last_batch=pad_last_batch,
dont_use_mmap=dont_use_mmap,
use_o_direct=enable_o_direct)
pipe.set_outputs(data)
return pipe
all_numpy_types = set(
[np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, np.int_, np.uint,
np.longlong, np.ulonglong, np.half, np.float16, np.single, np.double, np.longdouble,
np.csingle, np.cdouble, np.clongdouble, np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64, np.intp, np.uintp, np.float32, np.float64, np.float_,
np.complex64, np.complex128, np.complex_])
unsupported_numpy_types = set(
[np.csingle, np.cdouble, np.clongdouble, np.complex64, np.complex128, np.longdouble,
np.complex_])
rng = np.random.RandomState(12345)
# Test shapes, for each number of dims
test_shapes = {
0: [(), (), (), (), (), (), (), ()],
1: [(10, ), (12, ), (10, ), (20, ), (10, ), (12, ), (13, ), (19, )],
2: [(10, 10), (12, 10), (10, 12), (20, 15), (10, 11), (12, 11), (13, 11), (19, 10)],
3: [(6, 2, 5), (5, 6, 2), (3, 3, 3), (10, 1, 8), (8, 8, 3), (2, 2, 3), (8, 4, 3), (1, 10, 1)],
4: [(2, 6, 2, 5), (5, 1, 6, 2), (3, 2, 3, 3), (1, 10, 1, 8), (2, 8, 2, 3), (2, 3, 2, 3),
(1, 8, 4, 3), (1, 3, 10, 1)],
}
def _testimpl_types_and_shapes(device, shapes, type, batch_size, num_threads, fortran_order_arg,
file_arg_type, cache_header_information, dont_use_mmap=False,
enable_o_direct=False):
""" compare reader with numpy, with different batch_size and num_threads """
nsamples = len(shapes)
with tempfile.TemporaryDirectory(prefix=gds_data_root) as test_data_root:
# setup file
filenames = ["test_{:02d}.npy".format(i) for i in range(nsamples)]
full_paths = [os.path.join(test_data_root, fname) for fname in filenames]
for i in range(nsamples):
fortran_order = fortran_order_arg
if fortran_order is None:
fortran_order = random.choice([False, True])
create_numpy_file(full_paths[i], shapes[i], type, fortran_order)
# load manually
arrays = [np.load(filename) for filename in full_paths]
# load with numpy reader
file_list_arg = None
files_arg = None
file_filter_arg = None
if file_arg_type == 'file_list':
file_list_arg = os.path.join(test_data_root, "input.lst")
with open(file_list_arg, "w") as f:
f.writelines("\n".join(filenames))
elif file_arg_type == 'files':
files_arg = filenames
elif file_arg_type == "file_filter":
file_filter_arg = "*.npy"
else:
assert False
pipe = NumpyReaderPipeline(path=test_data_root,
files=files_arg,
file_list=file_list_arg,
file_filter=file_filter_arg,
cache_header_information=cache_header_information,
device=device,
batch_size=batch_size,
num_threads=num_threads,
device_id=0,
dont_use_mmap=dont_use_mmap,
enable_o_direct=enable_o_direct)
try:
pipe.build()
i = 0
while i < nsamples:
pipe_out = pipe.run()
for s in range(batch_size):
if i == nsamples:
break
pipe_arr = to_array(pipe_out[0][s])
ref_arr = arrays[i]
assert_array_equal(pipe_arr, ref_arr)
i += 1
finally:
del pipe
def _get_type_and_shape_params():
rng = np.random.default_rng(1902)
for device in ["cpu", "gpu"] if is_gds_supported() else ["cpu"]:
for fortran_order in [False, True, None]:
for dtype in all_numpy_types - unsupported_numpy_types:
for ndim in [0, 1, 2, rng.choice([3, 4])]:
if ndim <= 1 and fortran_order is not False:
continue
shapes = test_shapes[ndim]
file_arg_type = rng.choice(['file_list', 'files', 'file_filter'])
num_threads = rng.choice([1, 2, 3, 4, 5, 6, 7, 8])
batch_size = rng.choice([1, 3, 4, 8, 16])
yield device, fortran_order, dtype, shapes, file_arg_type, \
num_threads, batch_size
@params(
(False, ),
(True, ),
)
def test_header_parse(use_o_direct):
# Test different ndims to see how well we handle headers of different lengths and padding.
# The NPY token (header meta-data) + header is padded to the size aligned up to 64 bytes.
# In particular the `np.full((1,) * 21, 1., dtype=float32)` and
# `np.full((1,) * 22, 1., dtype=float32)` are the boundary between 128 and 192 bytes header.
# This make a good case for testing the bounds we use for extracting the header.
# The 32 is the max dimensionality handled by the numpy
ndims = list(range(33))
with tempfile.TemporaryDirectory(prefix=gds_data_root) as test_data_root:
names = [f"numpy_ndim_{ndim}.npy" for ndim in ndims]
paths = [os.path.join(test_data_root, name) for name in names]
assert len(paths) == len(ndims)
for ndim, path in zip(ndims, paths):
np.save(path, np.full((1, ) * ndim, 1., dtype=np.float32))
reader_kwargs = {} if not use_o_direct else {'use_o_direct': True, 'dont_use_mmap': True}
@pipeline_def(batch_size=1, device_id=0, num_threads=4)
def pipeline(test_filename):
arr = fn.readers.numpy(files=[test_filename], **reader_kwargs)
return arr
for ndim, path in zip(ndims, paths):
p = pipeline(test_filename=path)
p.build()
out, = p.run()
shapes = out.shape()
assert len(shapes) == 1, f"{len(shapes)}"
shape = shapes[0]
assert shape == (1,) * ndim, f"{ndim} {shape}"
@params(*list(_get_type_and_shape_params()))
def test_types_and_shapes(device, fortran_order, dtype, shapes, file_arg_type, num_threads,
batch_size):
cache_header_information = False
_testimpl_types_and_shapes(device, shapes, dtype, batch_size, num_threads, fortran_order,
file_arg_type, cache_header_information)
@cartesian_params((0, 1, 2, random.choice([3, 4])),
(True, False),
(random.choice(['file_list', 'files', 'file_filter']),),
(random.choice([1, 2, 3, 4, 5, 6, 7, 8]),),
(random.choice([1, 3, 4, 8, 16]),),
(random.choice(list(all_numpy_types - unsupported_numpy_types)),))
def test_o_direct(ndim, o_direct, file_arg_type, num_threads, batch_size, type,):
cache_header_information = False
device = 'cpu'
fortran_order = False
shapes = test_shapes[ndim]
_testimpl_types_and_shapes(device, shapes, type, batch_size, num_threads, fortran_order,
file_arg_type, cache_header_information, True, o_direct)
def _get_unsupported_param():
for device in ["cpu", "gpu"] if is_gds_supported() else ["cpu"]:
for dtype in unsupported_numpy_types:
yield device, dtype
@params(*list(_get_unsupported_param()))
def test_unsupported_types(device, dtype):
fortran_order = False
cache_header_information = False
file_arg_type = 'files'
ndim = 1
shapes = test_shapes[ndim]
num_threads = 3
batch_size = 3
with assert_raises(RuntimeError, glob="Unknown Numpy type string"):
_testimpl_types_and_shapes(
device, shapes, dtype, batch_size, num_threads,
fortran_order, file_arg_type, cache_header_information)
@params(*(["cpu", "gpu"] if is_gds_supported() else ["cpu"]))
def test_cache_headers(device):
type = np.float32
ndim = 2
shapes = test_shapes[ndim]
num_threads = 3
batch_size = 3
cache_header_information = True
fortran_order = False
file_arg_type = 'files'
_testimpl_types_and_shapes(device, shapes, type, batch_size, num_threads, fortran_order,
file_arg_type, cache_header_information)
def check_dim_mismatch(device, test_data_root, names):
pipe = Pipeline(2, 2, 0)
pipe.set_outputs(fn.readers.numpy(device=device, file_root=test_data_root, files=names))
err = None
try:
pipe.build()
pipe.run()
except RuntimeError as thrown:
err = thrown
finally:
del pipe
# asserts should not be in except block to avoid printing nested exception on failure
assert err, "Exception not thrown"
assert "Inconsistent data" in str(err), "Unexpected error message: {}".format(err)
@params(*(["cpu", "gpu"] if is_gds_supported() else ["cpu"]))
def test_dim_mismatch(device):
with tempfile.TemporaryDirectory(prefix=gds_data_root) as test_data_root:
names = ["2D.npy", "3D.npy"]
paths = [os.path.join(test_data_root, name) for name in names]
create_numpy_file(paths[0], [3, 4], np.float32, False)
create_numpy_file(paths[1], [2, 3, 4], np.float32, False)
check_dim_mismatch(device, test_data_root, names)
def check_type_mismatch(device, test_data_root, names):
err = None
pipe = Pipeline(2, 2, 0)
pipe.set_outputs(fn.readers.numpy(device=device, file_root=test_data_root, files=names))
try:
pipe.build()
pipe.run()
except RuntimeError as thrown:
err = thrown
finally:
del pipe
# asserts should not be in except block to avoid printing nested exception on failure
assert err, "Exception not thrown"
assert "Inconsistent data" in str(err), "Unexpected error message: {}".format(err)
assert "int32" in str(err) and "float" in str(err), "Unexpected error message: {}".format(err)
@params(*(["cpu", "gpu"] if is_gds_supported() else ["cpu"]))
def test_type_mismatch(device):
with tempfile.TemporaryDirectory(prefix=gds_data_root) as test_data_root:
names = ["int.npy", "float.npy"]
paths = [os.path.join(test_data_root, name) for name in names]
create_numpy_file(paths[0], [1, 2, 5], np.int32, False)
create_numpy_file(paths[1], [2, 3, 4], np.float32, False)
check_type_mismatch(device, test_data_root, names)
batch_size_alias_test = 64
@pipeline_def(batch_size=batch_size_alias_test, device_id=0, num_threads=4)
def numpy_reader_pipe(numpy_op, path, device="cpu", file_filter="*.npy"):
data = numpy_op(device=device,
file_root=path,
file_filter=file_filter,
seed=1234)
return data
def check_numpy_reader_alias(test_data_root, device):
new_pipe = numpy_reader_pipe(fn.readers.numpy,
path=test_data_root,
device=device,
file_filter="test_*.npy")
legacy_pipe = numpy_reader_pipe(fn.numpy_reader,
path=test_data_root,
device=device,
file_filter="test_*.npy")
try:
compare_pipelines(new_pipe, legacy_pipe, batch_size_alias_test, 50)
finally:
del new_pipe
del legacy_pipe
@params(*(["cpu", "gpu"] if is_gds_supported() else ["cpu"]))
def test_numpy_reader_alias(device):
with tempfile.TemporaryDirectory(prefix=gds_data_root) as test_data_root:
# create files
num_samples = 20
filenames = []
arr_np_list = []
for index in range(0, num_samples):
filename = os.path.join(test_data_root, "test_{:02d}.npy".format(index))
filenames.append(filename)
create_numpy_file(filename, (5, 2, 8), np.float32, False)
arr_np_list.append(np.load(filename))
check_numpy_reader_alias(test_data_root, device)
@pipeline_def(device_id=0, num_threads=8)
def numpy_reader_roi_pipe(file_root, device="cpu", file_filter='*.npy', roi_start=None,
rel_roi_start=None, roi_end=None, rel_roi_end=None, roi_shape=None,
rel_roi_shape=None, roi_axes=None, default_axes=[],
out_of_bounds_policy=None, fill_value=None):
data = fn.readers.numpy(
device=device,
file_root=file_root,
file_filter=file_filter,
shard_id=0,
num_shards=1,
cache_header_information=False)
roi_data = fn.readers.numpy(
device=device,
file_root=file_root,
file_filter=file_filter,
roi_start=roi_start,
rel_roi_start=rel_roi_start,
roi_end=roi_end,
rel_roi_end=rel_roi_end,
roi_shape=roi_shape,
rel_roi_shape=rel_roi_shape,
roi_axes=roi_axes,
out_of_bounds_policy=out_of_bounds_policy,
fill_value=fill_value,
shard_id=0,
num_shards=1,
cache_header_information=False)
sliced_data = fn.slice(
data,
start=roi_start,
rel_start=rel_roi_start,
end=roi_end,
rel_end=rel_roi_end,
shape=roi_shape,
rel_shape=rel_roi_shape,
axes=roi_axes or default_axes, # Slice has different default (axis_names="WH")
out_of_bounds_policy=out_of_bounds_policy,
fill_values=fill_value)
return roi_data, sliced_data
def _testimpl_numpy_reader_roi(file_root, batch_size, ndim, dtype, device, fortran_order=False,
file_filter="*.npy", roi_start=None, rel_roi_start=None,
roi_end=None, rel_roi_end=None, roi_shape=None, rel_roi_shape=None,
roi_axes=None, out_of_bounds_policy=None, fill_value=None):
default_axes = list(range(ndim))
pipe = numpy_reader_roi_pipe(
file_root=file_root, file_filter=file_filter, device=device, roi_start=roi_start,
rel_roi_start=rel_roi_start, roi_end=roi_end, rel_roi_end=rel_roi_end, roi_shape=roi_shape,
rel_roi_shape=rel_roi_shape, roi_axes=roi_axes, default_axes=default_axes,
out_of_bounds_policy=out_of_bounds_policy, fill_value=fill_value, batch_size=batch_size)
try:
pipe.build()
roi_out, sliced_out = pipe.run()
for i in range(batch_size):
roi_arr = to_array(roi_out[i])
sliced_arr = to_array(sliced_out[i])
assert_array_equal(roi_arr, sliced_arr)
finally:
del pipe
def _testimpl_numpy_reader_roi_empty_axes(testcase_name, file_root, batch_size, ndim, dtype, device,
fortran_order, file_filter="*.npy"):
# testcase name used for visibility in the output logs
@pipeline_def(batch_size=batch_size, device_id=0, num_threads=8)
def pipe():
data0 = fn.readers.numpy(
device=device,
file_root=file_root,
file_filter=file_filter,
shard_id=0,
num_shards=1,
cache_header_information=False,
seed=1234)
data1 = fn.readers.numpy(
device=device,
file_root=file_root,
file_filter=file_filter,
roi_start=[],
roi_end=[],
roi_axes=[],
shard_id=0,
num_shards=1,
cache_header_information=False,
seed=1234)
return data0, data1
p = pipe()
try:
p.build()
data0, data1 = p.run()
finally:
del p
for i in range(batch_size):
arr = to_array(data0[i])
roi_arr = to_array(data1[i])
assert_array_equal(arr, roi_arr)
def _testimpl_numpy_reader_roi_empty_range(testcase_name, file_root, batch_size, ndim, dtype,
device, fortran_order, file_filter="*.npy"):
# testcase name used for visibility in the output logs
@pipeline_def(batch_size=batch_size, device_id=0, num_threads=8)
def pipe():
data0 = fn.readers.numpy(
device=device,
file_root=file_root,
file_filter=file_filter,
shard_id=0,
num_shards=1,
cache_header_information=False,
seed=1234)
data1 = fn.readers.numpy(
device=device,
file_root=file_root,
file_filter=file_filter,
roi_start=[1],
roi_end=[1],
roi_axes=[1],
shard_id=0,
num_shards=1,
cache_header_information=False,
seed=1234)
return data0, data1
p = pipe()
try:
p.build()
data0, data1 = p.run()
for i in range(batch_size):
arr = to_array(data0[i])
roi_arr = to_array(data1[i])
for d in range(len(arr.shape)):
if d == 1:
assert roi_arr.shape[d] == 0
else:
assert roi_arr.shape[d] == arr.shape[d]
finally:
del p
# roi_start, rel_roi_start, roi_end, rel_roi_end, roi_shape,
# rel_roi_shape, roi_axes, out_of_bounds_policy
roi_args = [
([1, 2], None, None, None, None, None, None, None),
(None, [0.1, 0.2], None, None, None, None, None, None),
(None, None, [8, 7], None, None, None, None, None),
(None, None, None, [0.5, 0.9], None, None, None, None),
(None, None, None, None, [4, 5], None, None, None),
(None, None, None, None, None, [0.4, 0.8], None, None),
(1, None, 9, None, None, None, [0], None),
(1, None, 9, None, None, None, [1], None),
([1, 2], None, [8, 9], None, None, None, [0, 1], None),
([1, 2], None, [8, 9], None, None, None, [0, 1], None),
([1, 2], None, None, [0.5, 0.4], None, None, [0, 1], None),
(None, [0.1, 0.2], [8, 9], None, None, None, [0, 1], None),
([1, 2], None, [20, 9], None, None, None, [0, 1], "pad"),
([-10, 2], None, [8, 9], None, None, None, [0, 1], "pad"),
([1, 2], None, [20, 9], None, None, None, [0, 1], "trim_to_shape"),
([-10, 2], None, [8, 9], None, None, None, [0, 1], "trim_to_shape"),
(fn.random.uniform(range=(0, 2), shape=(2, ), dtype=types.INT32), None,
fn.random.uniform(range=(7, 10), shape=(2, ),
dtype=types.INT32), None, None, None, (0, 1), None),
(fn.random.uniform(range=(0, 2), shape=(1, ), dtype=types.INT32), None,
fn.random.uniform(range=(7, 10), shape=(1, ),
dtype=types.INT32), None, None, None, (1, ), None),
(None, fn.random.uniform(range=(0.0, 0.2), shape=(1, )), None,
fn.random.uniform(range=(0.8, 1.0), shape=(1, )), None, None, (1, ), None),
]
def _get_roi_suite_params():
i = 0
rng = np.random.default_rng(1902)
for roi_params in roi_args:
for fortran_order in [False, True, None]:
for device in ["cpu", "gpu"] if is_gds_supported() else ["cpu"]:
fill_value = rng.choice([None, 10.0])
yield (i,) + roi_params + (fortran_order, device, fill_value)
i += 1
@params(*list(_get_roi_suite_params()))
def test_numpy_reader_roi(i, roi_start, rel_roi_start, roi_end, rel_roi_end, roi_shape,
rel_roi_shape, roi_axes, out_of_bounds_policy, fortran_order,
device, fill_value):
# setup file
shapes = [(10, 10), (12, 10), (10, 12), (20, 15), (10, 11), (12, 11), (13, 11), (19, 10)]
ndim = 2
dtype = np.uint8
batch_size = 8
file_filter = "*.npy"
rng = np.random.default_rng(4242 + i)
with tempfile.TemporaryDirectory(prefix=gds_data_root) as test_data_root:
index = 0
for sh in shapes:
filename = os.path.join(test_data_root, "test_{:02d}.npy".format(index))
index += 1
if fortran_order is not None:
actual_fortran_order = fortran_order
else:
actual_fortran_order = rng.choice([False, True])
create_numpy_file(filename, sh, dtype, actual_fortran_order)
_testimpl_numpy_reader_roi(
test_data_root, batch_size, ndim, dtype, device,
fortran_order, file_filter, roi_start, rel_roi_start,
roi_end, rel_roi_end, roi_shape, rel_roi_shape, roi_axes,
out_of_bounds_policy, fill_value)
def _get_roi_empty_axes_params():
i = 0
for fortran_order in [False, True, None]:
for device in ["cpu", "gpu"] if is_gds_supported() else ["cpu"]:
for axes_or_range in ["axes", "range"]:
yield i, fortran_order, device, axes_or_range
i += 1
@params(*list(_get_roi_empty_axes_params()))
def test_numpy_reader_roi_empty_axes(i, fortran_order, device, axes_or_range):
# setup file
shapes = [(10, 10), (12, 10), (10, 12), (20, 15), (10, 11), (12, 11), (13, 11), (19, 10)]
ndim = 2
dtype = np.uint8
batch_size = 8
file_filter = "*.npy"
rng = np.random.default_rng(4242 + i)
with tempfile.TemporaryDirectory(prefix=gds_data_root) as test_data_root:
index = 0
for sh in shapes:
filename = os.path.join(test_data_root, "test_{:02d}.npy".format(index))
index += 1
if fortran_order is not None:
actual_fortran_order = fortran_order
else:
actual_fortran_order = rng.choice([False, True])
create_numpy_file(filename, sh, dtype, actual_fortran_order)
if axes_or_range == "axes":
_testimpl_numpy_reader_roi_empty_axes(
"empty axes", test_data_root, batch_size, ndim,
dtype, device, fortran_order, file_filter)
else:
assert axes_or_range == "range"
_testimpl_numpy_reader_roi_empty_range(
"empty range", test_data_root, batch_size, ndim,
dtype, device, fortran_order, file_filter)
def _testimpl_numpy_reader_roi_error(file_root, batch_size, ndim, dtype, device,
fortran_order=False, file_filter="*.npy",
roi_start=None, rel_roi_start=None,
roi_end=None, rel_roi_end=None,
roi_shape=None, rel_roi_shape=None,
roi_axes=None,
out_of_bounds_policy=None, fill_value=None):
@pipeline_def(batch_size=batch_size, device_id=0, num_threads=8)
def pipe():
data = fn.readers.numpy(
device=device,
file_root=file_root,
file_filter=file_filter,
roi_start=roi_start,
rel_roi_start=rel_roi_start,
roi_end=roi_end,
rel_roi_end=rel_roi_end,
roi_shape=roi_shape,
rel_roi_shape=rel_roi_shape,
roi_axes=roi_axes,
out_of_bounds_policy=out_of_bounds_policy,
fill_value=fill_value,
shard_id=0,
num_shards=1,
cache_header_information=False)
return data
p = pipe()
err = None
try:
p.build()
p.run()
except RuntimeError as thrown:
err = thrown
# asserts should not be in except block to avoid printing nested exception on failure
assert err, "Exception not thrown"
def _get_roi_error_params():
# roi_start, rel_roi_start, roi_end, rel_roi_end, roi_shape, rel_roi_shape,
# roi_axes, out_of_bounds_policy
roi_args = [
# Both roi_start and rel_roi_start
([1, 2], [0.1, 0.2], None, None, None, None, None, None),
(None, None, [8, 7], [0.4, 0.5], None, None, None, None), # Both roi_end and rel_roi_end
(None, None, [8, 7], None, [8, 7], None, None, None), # Both roi_end and roi_shape
(None, None, [8, 7], None, None, [0.4, 0.5], None, None), # Both roi_end and rel_roi_shape
(None, None, None, [0.5, 0.4], [8, 7], None, None, None), # Both rel_roi_end and roi_shape
([-1, 2], None, None, None, None, None, None, None), # Out of bounds anchor
(None, None, [100, 8], None, None, None, None, None), # Out of bounds end
(None, None, None, None, [100, 8], None, None, None), # Out of bounds shape
]
for device in ["cpu", "gpu"] if is_gds_supported() else ["cpu"]:
for roi_params in roi_args:
fill_value = rng.choice([None, 10.0])
yield (device,) + roi_params + (fill_value,)
@params(*list(_get_roi_error_params()))
def test_numpy_reader_roi_error(device, roi_start, rel_roi_start, roi_end, rel_roi_end, roi_shape,
rel_roi_shape, roi_axes, out_of_bounds_policy, fill_value):
# setup file
shapes = [(10, 10), (12, 10), (10, 12), (20, 15), (10, 11), (12, 11), (13, 11), (19, 10)]
ndim = 2
dtype = np.uint8
batch_size = 8
file_filter = "*.npy"
fortran_order = False
with tempfile.TemporaryDirectory(prefix=gds_data_root) as test_data_root:
index = 0
for sh in shapes:
filename = os.path.join(test_data_root, "test_{:02d}.npy".format(index))
index += 1
create_numpy_file(filename, sh, dtype, fortran_order=fortran_order)
_testimpl_numpy_reader_roi_error(test_data_root, batch_size, ndim, dtype, device,
fortran_order, file_filter, roi_start, rel_roi_start,
roi_end, rel_roi_end, roi_shape, rel_roi_shape, roi_axes,
out_of_bounds_policy, fill_value)
@cartesian_params(('cpu', 'gpu'),
((1, 2, 1), (3, 1, 2)),
(True, False),
(True, False))
def test_pad_last_sample(device, batch_description, dont_use_mmap, use_o_direct):
if not is_gds_supported() and device == 'gpu':
raise SkipTest("GDS is not supported in this platform")
if not dont_use_mmap and use_o_direct:
raise SkipTest("Cannot use O_DIRECT with mmap")
with tempfile.TemporaryDirectory(prefix=gds_data_root) as test_data_root:
# create files
num_samples, batch_size, num_shards = batch_description
filenames = []
ref_filenames = []
arr_np_list = []
last_file_name = None
for index in range(0, num_samples):
filename = os.path.join(test_data_root, "test_{:02d}.npy".format(index))
last_file_name = filename
filenames.append(filename)
create_numpy_file(filename, (5, 2, 8), np.float32, False)
arr_np_list.append(np.load(filename))
ref_filenames.append(filename)
while len(arr_np_list) < batch_size:
arr_np_list.append(np.load(last_file_name))
ref_filenames.append(last_file_name)
pipe = NumpyReaderPipeline(path=test_data_root,
files=filenames,
file_list=None,
file_filter=None,
device=device,
batch_size=batch_size,
num_threads=4,
device_id=0,
pad_last_batch=True,
num_shards=num_shards,
dont_use_mmap=dont_use_mmap,
enable_o_direct=use_o_direct)
pipe.build()
try:
for _ in range(2):
pipe_out = pipe.run()
for i in range(batch_size):
out_arr = to_array(pipe_out[0][i])
out_prop = pipe_out[0][i].source_info()
ref_arr = arr_np_list[i]
assert out_prop == ref_filenames[i]
assert_array_equal(out_arr, ref_arr)
finally:
del pipe
@cartesian_params(('global', 'local', 'none'),
(True, False))
def test_shuffling(shuffling, pad_last_batch):
if not is_gds_supported():
raise SkipTest("GDS is not supported in this platform")
with tempfile.TemporaryDirectory(prefix=gds_data_root) as test_data_root:
# create files
num_samples = 10
batch_size = 3
filenames = []
for index in range(0, num_samples):
filename = os.path.join(test_data_root, "test_{:02d}.npy".format(index))
filenames.append(filename)
create_numpy_file(filename, (3, 2, 1), np.int8, False)
random_shuffle = False
shuffle_after_epoch = False
stick_to_shard = False
if shuffling == 'global':
shuffle_after_epoch = True
elif shuffle_after_epoch == 'local':
random_shuffle = True
stick_to_shard = True
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
with pipe:
data_cpu = fn.readers.numpy(device='cpu',
files=filenames,
file_root=test_data_root,
shard_id=0,
num_shards=2,
pad_last_batch=pad_last_batch,
random_shuffle=random_shuffle,
shuffle_after_epoch=shuffle_after_epoch,
stick_to_shard=stick_to_shard)
data_gpu = fn.readers.numpy(device='gpu',
files=filenames,
file_root=test_data_root,
shard_id=0,
num_shards=2,
pad_last_batch=pad_last_batch,
random_shuffle=random_shuffle,
shuffle_after_epoch=shuffle_after_epoch,
stick_to_shard=stick_to_shard)
pipe.set_outputs(data_cpu, data_gpu)
pipe.build()
for _ in range(num_samples//batch_size * 2):
(cpu_arr, gpu_arr) = pipe.run()
assert_array_equal(to_array(cpu_arr), to_array(gpu_arr))
|
DALI-main
|
dali/test/python/reader/test_numpy.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import pipeline_def
import nvidia.dali.fn as fn
import nvidia.dali.types as types
from test_utils import compare_pipelines, check_batch
from nose_utils import assert_raises
from nose2.tools import params
import numpy as np
def test_not():
bs = 10
iters = 5
kwargs = {"batch_size": bs, "num_threads": 4, "device_id": 0, "seed": 42}
@pipeline_def(**kwargs)
def regular_pipe():
boolean_input = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=42)
return boolean_input == 0
@pipeline_def(enable_conditionals=True, **kwargs)
def not_pipe():
boolean_input = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=42)
return not boolean_input
pipes = [regular_pipe(), not_pipe()]
for pipe in pipes:
pipe.build()
compare_pipelines(*pipes, bs, iters)
def test_and():
bs = 10
iters = 5
kwargs = {"batch_size": bs, "num_threads": 4, "device_id": 0, "seed": 42}
@pipeline_def(**kwargs)
def regular_pipe():
boolean_input_0 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=6)
boolean_input_1 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=9)
const_F = types.Constant(np.array(False), device="cpu")
const_T = types.Constant(np.array(True), device="cpu")
return (boolean_input_0 & boolean_input_1, boolean_input_0 & const_F,
const_T & boolean_input_1)
@pipeline_def(enable_conditionals=True, **kwargs)
def and_pipe():
boolean_input_0 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=6)
boolean_input_1 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=9)
const_F = types.Constant(np.array(False), device="cpu")
const_T = types.Constant(np.array(True), device="cpu")
return (boolean_input_0 and boolean_input_1, boolean_input_0 and const_F, const_T
and boolean_input_1)
pipes = [regular_pipe(), and_pipe()]
for pipe in pipes:
pipe.build()
compare_pipelines(*pipes, bs, iters)
def test_or():
bs = 10
iters = 5
kwargs = {"batch_size": bs, "num_threads": 4, "device_id": 0, "seed": 42}
@pipeline_def(**kwargs)
def regular_pipe():
boolean_input_0 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=6)
boolean_input_1 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=9)
const_F = types.Constant(np.array(False), device="cpu")
const_T = types.Constant(np.array(True), device="cpu")
return (boolean_input_0 | boolean_input_1, boolean_input_0 | const_F,
const_T | boolean_input_1)
@pipeline_def(enable_conditionals=True, **kwargs)
def or_pipe():
boolean_input_0 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=6)
boolean_input_1 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=9)
const_F = types.Constant(np.array(False), device="cpu")
const_T = types.Constant(np.array(True), device="cpu")
return (boolean_input_0 or boolean_input_1, boolean_input_0 or const_F, const_T
or boolean_input_1)
pipes = [regular_pipe(), or_pipe()]
for pipe in pipes:
pipe.build()
compare_pipelines(*pipes, bs, iters)
def test_complex_expression():
bs = 10
iters = 5
kwargs = {"batch_size": bs, "num_threads": 4, "device_id": 0, "seed": 42}
@pipeline_def(**kwargs)
def regular_pipe():
boolean_input_0 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=6)
boolean_input_1 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=9)
boolean_input_2 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=12)
boolean_input_3 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=15)
return (boolean_input_0 | (boolean_input_1 & boolean_input_2)) | (boolean_input_3 == 0)
@pipeline_def(enable_conditionals=True, **kwargs)
def expr_pipe():
boolean_input_0 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=6)
boolean_input_1 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=9)
boolean_input_2 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=12)
boolean_input_3 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=15)
return boolean_input_0 or boolean_input_1 and boolean_input_2 or not boolean_input_3
pipes = [regular_pipe(), expr_pipe()]
for pipe in pipes:
pipe.build()
compare_pipelines(*pipes, bs, iters)
def test_lazy_eval():
bs = 10
iters = 5
kwargs = {"batch_size": bs, "num_threads": 4, "device_id": 0, "seed": 42}
@pipeline_def(enable_conditionals=True, **kwargs)
def if_pipe():
boolean_input_0 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=6)
boolean_input_1 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=9)
if boolean_input_0:
val = boolean_input_1 == False # noqa: E712
else:
val = boolean_input_0
return val
@pipeline_def(enable_conditionals=True, **kwargs)
def expr_pipe():
boolean_input_0 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=6)
boolean_input_1 = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=9)
val = boolean_input_0 and boolean_input_1 == False # noqa: E712
return val
pipes = [if_pipe(), expr_pipe()]
for pipe in pipes:
pipe.build()
compare_pipelines(*pipes, bs, iters)
def test_lazy_eval_with_oob():
bs = 10
iters = 5
kwargs = {"batch_size": bs, "num_threads": 4, "device_id": 0, "seed": 42}
@pipeline_def(enable_conditionals=True, **kwargs)
def base_pipe():
return types.Constant(np.bool_(True))
@pipeline_def(enable_conditionals=True, **kwargs)
def expr_pipe():
boolean_tensor_input = types.Constant(np.bool_([True, True, False]), device="cpu")
index_input_1 = types.Constant(np.int32(1), device="cpu")
index_input_42 = types.Constant(np.int32(42), device="cpu")
# do an oob access in the right subexpression that won't be evaluated.
val = boolean_tensor_input[index_input_1] or boolean_tensor_input[index_input_42]
return val
pipes = [base_pipe(), expr_pipe()]
for pipe in pipes:
pipe.build()
compare_pipelines(*pipes, bs, iters)
def logical_true_false_random():
""" Return an External Source that returns batch of [True, False, <random booleans>]
so we always have at least one sample that is True or False.
Otherwise we may end up with fully short-cutting part of the expression we want to test.
"""
rng = np.random.default_rng(seed=101)
def get_true_false_random(sample_info):
if sample_info.idx_in_batch == 0:
return np.array(True)
elif sample_info.idx_in_batch == 1:
return np.array(False)
else:
return rng.choice([np.array(True), np.array(False)])
return fn.external_source(source=get_true_false_random, batch=False)
logical_expressions = [
lambda x: not x,
lambda x: x and logical_true_false_random(),
lambda x: logical_true_false_random() and x,
lambda x: x or logical_true_false_random(),
lambda x: logical_true_false_random() or x,
]
@params(*logical_expressions)
def test_error_input(expression):
kwargs = {
"enable_conditionals": True,
"batch_size": 10,
"num_threads": 4,
"device_id": 0,
}
@pipeline_def(**kwargs)
def gpu_input():
input = fn.random.coin_flip(dtype=types.DALIDataType.BOOL)
return expression(input.gpu())
# We can make a valid graph with `not` op directly, the rest (`and`, `or`) is basically lowered
# to `if` statements and thus checked by graph via argument input placement validation.
with assert_raises(
RuntimeError, regex=("Logical expression `.*` is restricted to scalar \\(0-d tensors\\)"
" inputs of `bool` type, that are placed on CPU."
" Got a GPU input .*in logical expression.*|"
"Named arguments inputs to operators must be CPU data nodes."
" However, a GPU data node was provided")):
pipe = gpu_input()
pipe.build()
pipe.run()
@pipeline_def(**kwargs)
def non_scalar_input():
pred = fn.random.coin_flip(dtype=types.DALIDataType.BOOL)
stacked = fn.stack(pred, pred)
return expression(stacked)
with assert_raises(
RuntimeError, glob=("Logical expression `*` is restricted to scalar (0-d tensors)"
" inputs*, that are placed on CPU. Got a 1-d input"
" *in logical expression.")):
pipe = non_scalar_input()
pipe.build()
pipe.run()
boolean_restricted_logical_expressions = [
lambda x: x and logical_true_false_random(),
lambda x: logical_true_false_random() and x,
lambda x: x or logical_true_false_random(),
lambda x: logical_true_false_random() or x,
]
@params(*boolean_restricted_logical_expressions)
def test_non_boolean_input_error(expression):
kwargs = {
"enable_conditionals": True,
"batch_size": 10,
"num_threads": 4,
"device_id": 0,
}
@pipeline_def(**kwargs)
def non_bool_input():
input = fn.random.coin_flip(dtype=types.DALIDataType.INT32)
return expression(input)
with assert_raises(
RuntimeError, glob=("Logical expression `*` is restricted to scalar (0-d tensors)"
" inputs of `bool` type, that are placed on CPU. Got an input"
" of type `int32` *in logical expression.")):
pipe = non_bool_input()
pipe.build()
pipe.run()
boolable_types = [
bool, np.uint8, np.uint16, np.uint32, np.uint64, np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64
]
@params(*boolable_types)
def test_not_any_type(input_type):
batch_size = 10
kwargs = {
"enable_conditionals": True,
"batch_size": batch_size,
"num_threads": 4,
"device_id": 0,
}
def get_truthy_falsy(sample_info):
if sample_info.idx_in_batch < batch_size / 2:
return np.array(42, dtype=input_type)
else:
return np.array(0, dtype=input_type)
@pipeline_def(**kwargs)
def non_bool_input():
input = fn.external_source(source=get_truthy_falsy, batch=False)
return not input
pipe = non_bool_input()
pipe.build()
batch, = pipe.run()
target = [False if i < batch_size / 2 else True for i in range(batch_size)]
check_batch(batch, target)
|
DALI-main
|
dali/test/python/conditionals/test_logical_expressions.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import fn, pipeline_def, types
import numpy as np
from test_utils import check_batch
from nose_utils import assert_raises
def test_select_impls():
# Test recursive select that returns tuple of outputs (due to the ops having 2 outputs).
# Without supporting nested structures it encountered a ((DataNode, DataNode),) branch output
# and crashed.
def _select_fwd(op_range_lo, op_range_hi, ops, selected_op_idx, op_args, op_kwargs):
assert op_range_lo <= op_range_hi
if op_range_lo == op_range_hi:
return ops[op_range_lo](*op_args, **op_kwargs)
mid = (op_range_lo + op_range_hi) // 2
if selected_op_idx <= mid:
ret = _select_fwd(op_range_lo, mid, ops, selected_op_idx, op_args, op_kwargs)
else:
ret = _select_fwd(mid + 1, op_range_hi, ops, selected_op_idx, op_args, op_kwargs)
return ret
def _select_unpack(op_range_lo, op_range_hi, ops, selected_op_idx, op_args, op_kwargs):
assert op_range_lo <= op_range_hi
if op_range_lo == op_range_hi:
return ops[op_range_lo](*op_args, **op_kwargs)
mid = (op_range_lo + op_range_hi) // 2
if selected_op_idx <= mid:
a, b = _select_unpack(op_range_lo, mid, ops, selected_op_idx, op_args, op_kwargs)
else:
a, b = _select_unpack(mid + 1, op_range_hi, ops, selected_op_idx, op_args, op_kwargs)
return a, b
def select(ops, selected_op_idx, *op_args, unpacking_select=False, **op_kwargs):
if unpacking_select:
return _select_unpack(0, len(ops) - 1, ops, selected_op_idx, op_args, op_kwargs)
else:
return _select_fwd(0, len(ops) - 1, ops, selected_op_idx, op_args, op_kwargs)
def rotate(image, label):
image = fn.rotate(image, angle=42)
return image, label
def color(image, label):
image = fn.color_twist(image, saturation=0)
return image, label
@pipeline_def(enable_conditionals=True, num_threads=4, batch_size=8, device_id=0)
def pipeline(unpacking_select):
image = types.Constant(np.full((200, 300, 3), 42, dtype=np.uint8), device="cpu")
label = types.Constant(np.array(1), device="cpu")
ops = [rotate, color]
op_idx = fn.random.uniform(values=list(range(len(ops))))
image, label = select(ops, op_idx, image=image, label=label,
unpacking_select=unpacking_select)
return image, label
pipe_unpacking = pipeline(unpacking_select=True)
pipe_unpacking.build()
pipe_unpacking.run()
pipe_forwarding = pipeline(unpacking_select=False)
pipe_forwarding.build()
pipe_forwarding.run()
def test_dicts():
@pipeline_def(enable_conditionals=True, num_threads=4, batch_size=8, device_id=0)
def pipeline():
pred = fn.external_source(source=lambda x: np.array(x.idx_in_batch % 2), batch=False)
if pred:
out = {'out': np.array(2)}
else:
out = {'out': np.array(1)}
return out['out']
pipe = pipeline()
pipe.build()
out, = pipe.run()
check_batch(out, [i % 2 + 1 for i in range(8)])
@pipeline_def(enable_conditionals=True, num_threads=4, batch_size=8, device_id=0)
def pipeline_op():
pred = fn.external_source(source=lambda x: np.array(x.idx_in_batch % 2), batch=False)
data = types.Constant(np.array(42), device="cpu")
if pred:
out = {'out': data - 1}
else:
out = {'out': data + 1}
return out['out']
pipe_op = pipeline_op()
pipe_op.build()
out, = pipe_op.run()
check_batch(out, [41 if i % 2 else 43 for i in range(8)])
def test_tuples():
@pipeline_def(enable_conditionals=True, num_threads=4, batch_size=8, device_id=0)
def pipeline():
pred = fn.external_source(source=lambda x: np.array(x.idx_in_batch % 2), batch=False)
data = types.Constant(np.array(42), device="cpu")
if pred:
out = (data, data + 10, data + 20)
else:
out = (np.array(-10), data, data * 2)
a, b, c = out
return a, b, c
pipe = pipeline()
pipe.build()
a, b, c, = pipe.run()
check_batch(a, [42 if i % 2 else -10 for i in range(8)])
check_batch(b, [52 if i % 2 else 42 for i in range(8)])
check_batch(c, [62 if i % 2 else 84 for i in range(8)])
def test_nesting_error():
@pipeline_def(enable_conditionals=True, num_threads=4, batch_size=8, device_id=0)
def pipeline():
pred = fn.external_source(source=lambda x: np.array(x.idx_in_batch % 2), batch=False)
if pred:
out = {'out': np.array(2), 'mismatched': np.array(9999)}
else:
out = {'out': np.array(1)}
return out
with assert_raises(
ValueError, glob=("*Divergent data found in different branches of `if/else` control"
" flow statement. Variables in all code paths are merged into common"
" output batches. The values assigned to a given variable need to"
" have the same nesting structure in every code path"
" (both `if` branches).*"
"*The two structures don't have the same nested structure*"
"*The two dictionaries don't have the same set of keys."
" First structure has keys type=list str=*'out', 'mismatched'*,"
" while second structure has keys type=list str=*'out'*")):
_ = pipeline()
|
DALI-main
|
dali/test/python/conditionals/test_nests.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import pipeline_def, experimental
import nvidia.dali.fn as fn
import nvidia.dali.types as types
from nvidia.dali.types import SampleInfo
from nvidia.dali import _conditionals
from nvidia.dali.data_node import DataNode
import numpy as np
import os
from test_utils import check_batch, compare_pipelines
from nose_utils import assert_raises
from test_utils import get_dali_extra_path
from nose2.tools import params
import itertools
def test_condition_stack():
test_stack = _conditionals._ConditionStack()
pred_node = DataNode("PredOp")
pred_nested = DataNode("PredOp2")
some_op = DataNode("SomeOp")
some_nested_op = DataNode("SomeOp2")
# model:
# if pred_node:
# some_op()
# if pred_nested:
# some_nested_op()
test_stack.register_data_nodes(pred_node)
test_stack.register_data_nodes(pred_nested)
# Both visible in global scope
assert test_stack._find_closest(pred_node) == 0
assert test_stack._find_closest(pred_nested) == 0
# First predicate, no splitting required, as this is the first nesting level
first_level = test_stack.push_predicate(pred_node)
assert _conditionals._data_node_repr(pred_node) == _conditionals._data_node_repr(first_level)
test_stack.track_true_branch()
test_stack.register_data_nodes(some_op)
assert test_stack._find_closest(some_op) == 1
assert test_stack._find_closest(pred_nested) == 0
assert test_stack.stack_depth() == 2
true_split = test_stack._realize_split(pred_nested, 0)
second_level = test_stack.push_predicate(pred_nested)
# Second predicate require splitting
assert _conditionals._data_node_repr(true_split) == _conditionals._data_node_repr(second_level)
test_stack.track_true_branch()
test_stack.register_data_nodes(some_nested_op)
assert test_stack._find_closest(some_nested_op) == 2
# It's already on this level
assert len(test_stack.top().produced) == 1
preprocessed = test_stack.preprocess_input(some_nested_op)
assert (_conditionals._data_node_repr(some_nested_op) == _conditionals._data_node_repr(
preprocessed))
assert len(test_stack.top().produced) == 1
# This one is not
assert len(test_stack.top().produced) == 1
preprocessed = test_stack.preprocess_input(some_op)
assert _conditionals._data_node_repr(some_op) != _conditionals._data_node_repr(some_nested_op)
assert len(test_stack.top().produced) == 2
test_stack.pop()
test_stack.pop()
assert len(test_stack.top().produced) == 2
rng = np.random.default_rng()
# Predicates
num_gens = [
lambda x: np.int32(x.idx_in_batch - 3), lambda x: np.int32(-1
if x.idx_in_batch % 2 == 0 else 1),
lambda x: np.int32((x.idx_in_batch % 3 == 0) - 1), lambda _: np.int32(1), lambda _: np.int32(0),
lambda _: np.int32(-1),
lambda _: rng.choice([np.int32(-2), np.int32(0), np.int32(2)])
]
pred_gens = [
lambda x: np.array(x.idx_in_batch < 3), lambda x: np.array(x.idx_in_batch % 2 == 0),
lambda x: np.array(x.idx_in_batch % 3 == 0), lambda x: np.array(
(x.idx_in_batch + (x.iteration % 2)) % 2 == 0), lambda _: np.array(False),
lambda _: rng.choice([np.array(True), np.array(False)])
]
input_gens = [lambda x: np.array(0), lambda x: np.array(x.idx_in_epoch)]
def generic_execute(function, input_gen_list, optional_params=None):
"""Given a Python `function` (taking some positional arguments) and a list of sample generators,
execute the function twice on batches of data generated by the generator and compare to test
the conditional execution.
The function is executed both as a:
* DALI Pipeline with conditional execution enabled. External source nodes are passed
as positional parameters and fed with the generated batches.
* Regular function, where we pass the batches sample-by-sample to build output batches.
Parameters
----------
function : callable
function used for testing
input_gen_list : list of sample generators
Possibly a stateful generator
optional_params : list of dictionaries, optional
Optional kwargs for external source associated with given input position, by default None
"""
if optional_params is None:
optional_params = [{} for _ in input_gen_list]
assert len(input_gen_list) == len(optional_params), ("Optional param should be provided for"
" every external source node.")
bs = 10
iters = 5
kwargs = {
"batch_size": bs,
"num_threads": 4,
"device_id": 0,
"prefetch_queue_depth": 1 # so that it's easier to use external source
}
# Prepare external source nodes with placeholder names, convert
es_inputs = [
fn.external_source(name=f"input_{i}", **params) for i, params in enumerate(optional_params)
]
pipeline_definition = pipeline_def(enable_conditionals=True)(function)
def gen_batch(generator, bs, iter):
return [generator(SampleInfo(bs * iter + i, i, iter, 0)) for i in range(bs)]
pipe = pipeline_definition(*es_inputs, **kwargs)
pipe.build()
for iter in range(iters):
batches = [gen_batch(gen, bs, iter) for gen in input_gen_list]
for i, batch in enumerate(batches):
pipe.feed_input(f"input_{i}", batch)
outputs = pipe.run()
baseline_outputs = []
for inputs_i in zip(*batches):
outputs_i = function(*inputs_i)
# make it a tad more generic
if not isinstance(outputs_i, tuple):
outputs_i = outputs_i,
baseline_outputs.append(outputs_i)
# Repack list of tuples into tuple of lists.
baseline_outputs = tuple(zip(*baseline_outputs))
# make the elements actually lists:
baseline_outputs = (list(baseline) for baseline in baseline_outputs)
for out, baseline in zip(outputs, baseline_outputs):
check_batch(out, baseline, bs)
# Tests below are ported from dali/test/python/autograph/converters/test_control_flow.py
@params(*num_gens)
def test_basic(num_gen):
def f(n):
a = np.int32(0)
b = np.int32(0)
if n > 0:
a = -n
else:
b = 2 * n
return a, b
generic_execute(f, [num_gen])
@params(*num_gens)
def test_complex_outputs(num_gen):
class DataClass(object):
def __init__(self, a, b):
self.a = a
self.b = b
def f(n, obj):
obj.a = np.int32(0)
obj.b = np.int32(0)
if n > 0:
obj.a = -n
else:
obj.b = 2 * n
return obj.a, obj.b
generic_execute(lambda input: f(input, DataClass(np.int32(0), np.int32(0))), [num_gen])
@params(*num_gens)
def test_single_output(num_gen):
def f(n):
if n > 0:
n = -n
return n
generic_execute(f, [num_gen])
@params(*num_gens)
def test_unbalanced(num_gen):
def f(n):
if n > 0:
n = np.int32(3)
return n
generic_execute(f, [num_gen])
@params(*num_gens)
def test_local_var(num_gen):
def f(n):
if n > 0:
b = np.int32(4)
n = b + 1
return n
generic_execute(f, [num_gen])
@params(*num_gens)
def test_local_remains_local(num_gen):
def f(n):
if n > 0:
b = np.int32(4)
n = b + 1
return n
generic_execute(f, [num_gen])
@params(*num_gens)
def test_no_outputs(num_gen):
def f(n):
if n > 0:
b = np.int32(4) # pylint:disable=unused-variable # noqa: F841
return n
generic_execute(f, [num_gen])
@params(*num_gens)
def test_created_outputs(num_gen):
def f(i):
if i == 0:
result = i - 1
else:
result = i + 1
return result
generic_execute(f, [num_gen])
# Simple cases, where we produce new data node in the branch
@params(*num_gens)
def test_one_branch_new_node(num_gen):
def f(n):
result = n * 0
if n >= 0:
result = n + 10
return result
generic_execute(f, [num_gen])
@params(*num_gens)
def test_both_branches_new_node(num_gen):
def f(n):
if n >= 0:
result = n + 10
else:
result = n - 10
return result
generic_execute(f, [num_gen])
@params(*num_gens)
def test_chain_branches_new_node(num_gen):
def f(n):
if n == 0:
result = n + 10
elif n > 0:
result = n + 100
else:
result = n - 50
return result
generic_execute(f, [num_gen])
# Cases where we do only assignment and no new node is produced within branch, so we need to
# detect usage in other way than looking at operator inputs
@params(*pred_gens)
def test_one_branch_only_assign(pred):
def f(pred, base, true_branch):
result = base
if pred:
result = true_branch
return result
generic_execute(f, [pred, lambda _: np.int32(42), lambda _: np.int32(7)])
@params(*pred_gens)
def test_both_branches_only_assign(pred):
def f(pred, true_branch, false_branch):
if pred:
result = true_branch
else:
result = false_branch
return result
generic_execute(f, [pred, lambda _: np.int32(6), lambda _: np.int32(9)])
@params(*itertools.product(pred_gens, pred_gens))
def test_chain_branches_only_assign(pred_1, pred_2):
def f(pred_1, pred_2, true_branch, elif_branch, else_branch):
if pred_1:
result = true_branch
elif pred_2:
result = elif_branch
else:
result = else_branch
return result
generic_execute(
f, [pred_1, pred_2, lambda _: np.int32(42), lambda _: np.int32(6), lambda _: np.int32(9)])
# More ifs - nesting and sequences
@params(*itertools.product(["cpu", "gpu"], input_gens, pred_gens, pred_gens))
def test_consecutive(dev, input, pred_0, pred_1):
def f(input, pred_0, pred_1):
if pred_0:
output = input + 1
else:
output = input + 2
if pred_1:
output2 = output + 3
else:
output2 = output + 4
return output, output2
generic_execute(f, [input, pred_0, pred_1], [{"device": dev}, {}, {}])
@params(*itertools.product(["cpu", "gpu"], input_gens, pred_gens, pred_gens))
def test_nested(dev, input, pred_0, pred_1):
def f(input, pred_0, pred_1):
if pred_0:
if pred_1:
output = input + 10
else:
output = input + 200
else:
output = input + 3000
return output
generic_execute(f, [input, pred_0, pred_1], [{"device": dev}, {}, {}])
@params(*itertools.product(["cpu", "gpu"], input_gens, pred_gens, pred_gens))
def test_nested_with_assignment(dev, input, pred_0, pred_1):
def f(input, pred_0, pred_1):
to_assign = input * -5
if pred_0:
if pred_1:
output = input + 10
else:
output = to_assign
else:
output = input + 3000
return output
generic_execute(f, [input, pred_0, pred_1], [{"device": dev}, {}, {}])
@params(*itertools.product(["cpu", "gpu"], input_gens, num_gens))
def test_multiple_nests(dev, input, num):
def f(input, num):
if num == -2:
if num == -1:
if num == 0:
if num == 1:
if num == 2:
if num > 3:
output = input - 100
else:
output = input + 100
else:
output = input - 200
else:
output = input + 400
else:
output = input - 800
else:
output = input + 1600
else:
output = input - 3200
return output
generic_execute(f, [input, num], [{"device": dev}, {}])
# Compare pure Split/Merge operators with if statement
def _impl_against_split_merge(base_additional_kwargs={}, conditional_additional_kwargs={}):
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
bs = 10
iters = 5
kwargs = {"batch_size": bs, "num_threads": 4, "device_id": 0, "seed": 42}
@experimental.pipeline_def(**kwargs, **base_additional_kwargs)
def regular_pipe():
encoded, _ = fn.readers.caffe(path=caffe_db_folder, seed=7)
decoded = fn.decoders.image(encoded, device="mixed")
pred = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=8)
true, false = fn._conditional.split(decoded, predicate=pred)
output_true = fn.rotate(true, angle=30)
output_false = fn.flip(false, horizontal=True)
return fn._conditional.merge(output_true, output_false, predicate=pred)
@experimental.pipeline_def(enable_conditionals=True, **kwargs, **conditional_additional_kwargs)
def conditional_pipe():
encoded, _ = fn.readers.caffe(path=caffe_db_folder, seed=7)
decoded = fn.decoders.image(encoded, device="mixed")
pred = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=8)
if pred:
output = fn.rotate(decoded, angle=30)
else:
output = fn.flip(decoded, horizontal=True)
return output
pipes = [regular_pipe(), conditional_pipe()]
for pipe in pipes:
pipe.build()
compare_pipelines(*pipes, bs, iters)
def test_against_split_merge():
_impl_against_split_merge()
# Compare pure Split/Merge operators with if statement to see if DataNodes produced by `.gpu()`
# are registered
def _impl_dot_gpu(base_additional_kwargs={}, conditional_additional_kwargs={}):
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
bs = 10
iters = 5
kwargs = {"batch_size": bs, "num_threads": 4, "device_id": 0, "seed": 42}
@experimental.pipeline_def(**kwargs, **base_additional_kwargs)
def regular_pipe():
encoded, _ = fn.readers.caffe(path=caffe_db_folder, seed=1)
decoded = fn.decoders.image(encoded, device="cpu")
pred = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=2)
true, false = fn._conditional.split(decoded, predicate=pred)
output_true = fn.rotate(true.gpu(), angle=30)
output_false = fn.flip(false, horizontal=True).gpu()
return fn._conditional.merge(output_true, output_false, predicate=pred)
@experimental.pipeline_def(enable_conditionals=True, **kwargs, **conditional_additional_kwargs)
def conditional_pipe():
encoded, _ = fn.readers.caffe(path=caffe_db_folder, seed=1)
decoded = fn.decoders.image(encoded)
pred = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=2)
if pred:
decoded_gpu_true = decoded.gpu()
# The `decoded` will be split as we look it up in a scope of a branch,
# so the new node is built based on that split batch
if not conditional_additional_kwargs:
assert "__Split" in decoded_gpu_true.name
output = fn.rotate(decoded_gpu_true, angle=30)
else:
output = fn.flip(decoded, name="flip_in_else", horizontal=True)
output = output.gpu()
# here we crate new node based on the one already produced in this scope,
# so the source name is kept
if not conditional_additional_kwargs:
assert output.name == "flip_in_else"
return output
pipes = [regular_pipe(), conditional_pipe()]
for pipe in pipes:
pipe.build()
compare_pipelines(*pipes, bs, iters)
def test_dot_gpu():
_impl_dot_gpu()
# Test if operators without positional inputs but with argument inputs are correctly handled
# in the split/merge - so they are tracked in the local scope.
def _impl_arg_inputs_scoped_tracking(global_additional_kwargs={}, scoped_additional_kwargs={}):
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
bs = 10
iters = 5
kwargs = {"batch_size": bs, "num_threads": 4, "device_id": 0, "seed": 42}
@experimental.pipeline_def(enable_conditionals=True, **kwargs, **global_additional_kwargs)
def global_transform_pipe():
encoded, _ = fn.readers.caffe(path=caffe_db_folder)
decoded = fn.decoders.image(encoded, device="mixed")
pred = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=6)
angle = fn.random.uniform(values=[10, 20, 30], seed=7)
rotate_transform = fn.transforms.rotation(angle=angle)
if pred:
output = fn.warp_affine(decoded, matrix=rotate_transform)
else:
output = decoded
return output
@experimental.pipeline_def(enable_conditionals=True, **kwargs, **scoped_additional_kwargs)
def scoped_transform_pipe():
encoded, _ = fn.readers.caffe(path=caffe_db_folder)
decoded = fn.decoders.image(encoded, device="mixed")
pred = fn.random.coin_flip(dtype=types.DALIDataType.BOOL, seed=6)
angle = fn.random.uniform(values=[10, 20, 30], seed=7)
if pred:
# This is the crux of the test, the transforms.rotate has no positional inputs,
# but it has a DataNode argument input - it should detect it as produced in this scope.
rotate_transform = fn.transforms.rotation(angle=angle)
output = fn.warp_affine(decoded, matrix=rotate_transform)
else:
output = decoded
return output
pipes = [global_transform_pipe(), scoped_transform_pipe()]
for pipe in pipes:
pipe.build()
compare_pipelines(*pipes, bs, iters)
def test_arg_inputs_scoped_tracking():
_impl_arg_inputs_scoped_tracking()
def _impl_arg_inputs_scoped_uninitialized(additional_kwargs={}):
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
bs = 10
kwargs = {"batch_size": bs, "num_threads": 4, "device_id": 0}
@experimental.pipeline_def(enable_conditionals=True, **kwargs, **additional_kwargs)
def scoped_transform_pipe():
encoded, _ = fn.readers.caffe(path=caffe_db_folder)
decoded = fn.decoders.image(encoded, device="mixed")
pred = fn.random.coin_flip(dtype=types.DALIDataType.BOOL)
angle = fn.random.uniform(values=[10, 20, 30])
if pred:
rotate_transform = fn.transforms.rotation(angle=angle)
output = fn.warp_affine(decoded, matrix=rotate_transform)
else:
output = decoded
# Check that the rotate_transform is indeed local to the branch by trying to return it
# and generating uninitialized error.
return output, rotate_transform
with assert_raises(
RuntimeError, glob=("Encountered inconsistent outputs out of the `if/else` control flow"
" statement. Variables need to be initialized in every code path"
" (both `if` branches). Variable 'rotate_transform' must also be"
" initialized in the `else` branch.")):
pipe = scoped_transform_pipe()
pipe.build()
pipe.run()
def test_arg_inputs_scoped_uninitialized():
_impl_arg_inputs_scoped_uninitialized()
# Unified return tests - TODO(klecki)
# Generator tests, remove the random predicate to test the same predicate in both pipelines.
@params(*(pred_gens[:-1]))
def _impl_generators(pred, base_additional_kwargs={}, conditional_additional_kwargs={}):
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
bs = 10
iters = 5
kwargs = {"batch_size": bs, "num_threads": 4, "device_id": 0, "seed": 42}
@experimental.pipeline_def(**kwargs, **base_additional_kwargs)
def baseline_pipe():
encoded, _ = fn.readers.caffe(path=caffe_db_folder, seed=10)
rand = fn.random.uniform(seed=11)
predicate = fn.external_source(source=pred, batch=False)
true_encoded, _ = fn._conditional.split(encoded, predicate=predicate)
true_rand, _ = fn._conditional.split(rand, predicate=predicate)
# TODO(klecki): Debug mode currently requires explicit constants instantiation
if base_additional_kwargs:
u8_zeros = types.Constant(np.uint8([0]), device="cpu")
f32_zeros = types.Constant(np.float32(0.), device="cpu")
else:
u8_zeros = np.uint8([0])
f32_zeros = np.float32(0.)
_, false_u8 = fn._conditional.split(u8_zeros, predicate=predicate)
_, false_f32 = fn._conditional.split(f32_zeros, predicate=predicate)
encoded_out = fn._conditional.merge(true_encoded, false_u8, predicate=predicate)
rand_out = fn._conditional.merge(true_rand, false_f32, predicate=predicate)
return encoded_out, rand_out
@experimental.pipeline_def(enable_conditionals=True, **kwargs, **conditional_additional_kwargs)
def conditional_pipe():
predicate = fn.external_source(source=pred, batch=False)
# Generators work by running in top scope and splitting for particular nesting
if predicate:
encoded_out, _ = fn.readers.caffe(path=caffe_db_folder, seed=10)
rand_out = fn.random.uniform(seed=11)
else:
encoded_out = types.Constant(np.uint8([0]), device="cpu")
rand_out = types.Constant(np.float32(0.), device="cpu")
return encoded_out, rand_out
pipes = [baseline_pipe(), conditional_pipe()]
for pipe in pipes:
pipe.build()
compare_pipelines(*pipes, bs, iters)
@params(*(pred_gens[:-1]))
def test_generators(pred):
_impl_generators(pred)
# Mismatched branches test (uninitialized values)
def _impl_uninitialized(additional_kwargs={}):
bs = 10
kwargs = {
"batch_size": bs,
"num_threads": 4,
"device_id": 0,
}
@experimental.pipeline_def(enable_conditionals=True, **kwargs, **additional_kwargs)
def one_branch():
pred = fn.random.coin_flip(dtype=types.DALIDataType.BOOL)
if pred:
output = fn.random.uniform()
return output
with assert_raises(
RuntimeError, glob=("Encountered inconsistent outputs out of the `if/else` control flow"
" statement. Variables need to be initialized in every code path"
" (both `if` branches). Variable 'output' must also be initialized"
" in the `else` branch.")):
p = one_branch()
p.build()
p.run()
@experimental.pipeline_def(enable_conditionals=True, **kwargs)
def one_return():
pred = fn.random.coin_flip(dtype=types.DALIDataType.BOOL)
if pred:
return fn.random.uniform()
with assert_raises(
RuntimeError, glob=("Encountered inconsistent outputs out of the `if/else` control flow"
" statement. Variables need to be initialized in every code path"
" (both `if` branches). The `else` branch must also have a return"
" statement.")):
p = one_return()
p.build()
p.run()
def test_uninitialized():
_impl_uninitialized()
def _tensor_arg_permute_batch_params():
batch_sizes = [1, 5, 8]
inp0 = [[np.full((2, 2), i, dtype=np.float32) for i in range(batch_size)]
for batch_size in batch_sizes]
mask_batches = [
np.array([i % 2 for i in range(batch_size)], dtype=bool) for batch_size in batch_sizes
]
kwarg_batches = [np.array([pred for pred in mask], dtype=np.int32) for mask in mask_batches]
return (inp0, ), mask_batches, {'indices': kwarg_batches}
def _tensor_arg_transform_per_dim_params(arg_name):
def inner():
batch_sizes = [5, 1, 2, 8]
mask_batches = [
np.array([i % 2 for i in range(batch_size)], dtype=bool) for batch_size in batch_sizes
]
kwarg_batches = [
np.array([[pred, pred] for pred in mask], dtype=np.float32) for mask in mask_batches
]
return tuple(), mask_batches, {arg_name: kwarg_batches}
return inner
def _tensor_arg_rotate_params():
batch_sizes = [3, 1, 2, 4]
mask_batches = [
np.array([i % 2 for i in range(batch_size)], dtype=bool) for batch_size in batch_sizes
]
kwarg_batches = [
np.array([10 + 45 * pred for pred in mask], dtype=np.float32) for mask in mask_batches
]
return tuple(), mask_batches, {'angle': kwarg_batches}
def _tensor_arg_roi_random_crop_params():
batch_sizes = [1, 2, 7, 3]
crop_shape = [[
np.array([100 * i + 50, 200 * i + 50, 3], dtype=np.int32) for i in range(batch_size)
] for batch_size in batch_sizes]
roi_start = [[
np.array([sample[0] // 2, sample[1] // 2, sample[2]], dtype=np.int32) for sample in batch
] for batch in crop_shape]
mask_batches = [
np.array([i % 2 for i in range(batch_size)], dtype=bool) for batch_size in batch_sizes
]
return tuple(), mask_batches, {
'crop_shape': crop_shape,
'roi_start': roi_start,
'roi_end': crop_shape
}
def _tensor_arg_shape_kwarg():
batch_sizes = [1, 2, 3, 16, 5]
shape = [[np.array([1 + 3 * i, 2 * (i + 1) - 1], dtype=np.int32) for i in range(batch_size)]
for batch_size in batch_sizes]
mask_batches = [
np.array([i % 2 for i in range(batch_size)], dtype=bool) for batch_size in batch_sizes
]
return tuple(), mask_batches, {'shape': shape}
# Test operators that infer their batch sizes from the tensor argument inputs
@params(fn.permute_batch, fn.roi_random_crop, fn.transforms.crop, fn.transforms.scale,
fn.transforms.shear, fn.transforms.translation, fn.transforms.rotation, fn.random.uniform,
fn.random.normal, fn.random.coin_flip)
def test_named_tensor_arguments(op):
ops2params = {
fn.permute_batch: _tensor_arg_permute_batch_params,
fn.roi_random_crop: _tensor_arg_roi_random_crop_params,
fn.transforms.crop: _tensor_arg_transform_per_dim_params('from_start'),
fn.transforms.scale: _tensor_arg_transform_per_dim_params('scale'),
fn.transforms.shear: _tensor_arg_transform_per_dim_params('angles'),
fn.transforms.translation: _tensor_arg_transform_per_dim_params('offset'),
fn.transforms.rotation: _tensor_arg_rotate_params,
fn.random.uniform: _tensor_arg_shape_kwarg,
fn.random.normal: _tensor_arg_shape_kwarg,
fn.random.coin_flip: _tensor_arg_shape_kwarg,
}
def dummy_source(batches):
def cb():
for batch in batches:
yield batch
return cb
def get_pipeline(op, args_batches, mask_batches, kwargs_batches, num_threads=4, device_id=0):
max_batch_size = max(len(batch) for batch in mask_batches)
@pipeline_def(batch_size=max_batch_size, num_threads=num_threads, device_id=device_id)
def split_pipeline():
args = [fn.external_source(dummy_source(arg_batches)) for arg_batches in args_batches]
mask = fn.external_source(dummy_source(mask_batches))
kwargs = {
kwarg_name: fn.external_source(dummy_source(batches))
for kwarg_name, batches in kwargs_batches.items()
}
kwargs_split = {
kwarg_name: fn._conditional.split(batch, predicate=mask)
for kwarg_name, batch in kwargs.items()
}
split_args = [fn._conditional.split(arg, predicate=mask) for arg in args]
left_args = [left_arg for left_arg, _ in split_args]
right_args = [right_arg for _, right_arg in split_args]
left = op(
*left_args,
**{kwarg_name: left_kwarg
for kwarg_name, (left_kwarg, _) in kwargs_split.items()})
right = op(
*right_args, **{
kwarg_name: right_kwarg
for kwarg_name, (_, right_kwarg) in kwargs_split.items()
})
batch = fn._conditional.merge(left, right, predicate=mask)
return batch
return split_pipeline()
args_batches, mask_batches, kwargs_batches = ops2params[op]()
pipe = get_pipeline(op=op, args_batches=args_batches, mask_batches=mask_batches,
kwargs_batches=kwargs_batches)
pipe.build()
for _ in range(len(mask_batches)):
pipe.run()
def test_error_condition():
kwargs = {
"enable_conditionals": True,
"batch_size": 10,
"num_threads": 4,
"device_id": 0,
}
@experimental.pipeline_def(**kwargs)
def gpu_condition():
pred = fn.random.coin_flip(dtype=types.DALIDataType.BOOL)
# We have to create a new, pure GPU node, otherwise we still find the CPU node.
if pred.gpu() | False:
output = np.array(1)
else:
output = np.array(0)
return output
# TODO(klecki): Extend the error checking so we can provide better error message here.
with assert_raises(
RuntimeError, glob=("Named arguments inputs to operators must be CPU data nodes."
" However, a GPU data node was provided")):
pipe = gpu_condition()
pipe.build()
print(pipe.run())
@experimental.pipeline_def(**kwargs)
def non_scalar_condition():
pred = fn.random.coin_flip(dtype=types.DALIDataType.BOOL)
stacked = fn.stack(pred, pred)
if stacked:
output = np.array(1)
else:
output = np.array(0)
return output
with assert_raises(
RuntimeError, glob=("Conditions inside `if` statements are restricted to scalar"
" (0-d tensors) inputs, that are placed on CPU."
" Got a 1-d input as a condition of the `if` statement.")):
pipe = non_scalar_condition()
pipe.build()
pipe.run()
boolable_types = [
bool, np.uint8, np.uint16, np.uint32, np.uint64, np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64
]
@params(*boolable_types)
def test_predicate_any_type(input_type):
batch_size = 10
kwargs = {
"enable_conditionals": True,
"batch_size": batch_size,
"num_threads": 4,
"device_id": 0,
}
def get_truthy_falsy(sample_info):
if sample_info.idx_in_batch < batch_size / 2:
return np.array(7, dtype=input_type)
else:
return np.array(0, dtype=input_type)
@experimental.pipeline_def(**kwargs)
def non_bool_predicate():
predicate = fn.external_source(source=get_truthy_falsy, batch=False)
if predicate:
output = types.Constant(np.array(42), device="cpu")
else:
output = types.Constant(np.array(0), device="cpu")
return output
pipe = non_bool_predicate()
pipe.build()
batch, = pipe.run()
target = [42 if i < batch_size / 2 else 0 for i in range(batch_size)]
check_batch(batch, target)
def test_data_node_if_error():
batch_size = 10
kwargs = {
"enable_conditionals": False,
"batch_size": batch_size,
"num_threads": 4,
"device_id": 0,
}
@pipeline_def(**kwargs)
def pipeline():
predicate = fn.random.coin_flip()
if predicate:
output = types.Constant(np.array(42), device="cpu")
else:
output = types.Constant(np.array(0), device="cpu")
return output
with assert_raises(
TypeError, glob="\"DataNode\" was used in conditional context*"
" To use conditional execution via `if` statements you need to specify"
" `enable_conditionals=True` in `@nvidia.dali.pipeline_def` decorator*"):
pipe = pipeline()
pipe.build()
pipe.run()
def test_sanity_enable_conditionals():
batch_size = 10
kwargs = {
"batch_size": batch_size,
"num_threads": 4,
"device_id": 0,
}
# Use no parenthesis version:
@pipeline_def
def pipeline(a, b):
predicate = fn.random.coin_flip()
if predicate:
output = types.Constant(np.array(a), device="cpu")
else:
output = types.Constant(np.array(b), device="cpu")
return output
pipe = pipeline(10, enable_conditionals=True, b=4, **kwargs)
pipe.build()
pipe.run()
|
DALI-main
|
dali/test/python/conditionals/test_pipeline_conditionals.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import itertools
import numpy as np
import nvidia.dali.fn as fn
import nvidia.dali.types as types
from nose2.tools import params
from nose_utils import assert_raises
from nvidia.dali import pipeline_def
from test_utils import get_dali_extra_path, to_array
filenames = glob.glob(f'{get_dali_extra_path()}/db/video/[cv]fr/*.mp4')
# filter out HEVC because some GPUs do not support it
filenames = filter(lambda filename: 'hevc' not in filename, filenames)
# mpeg4 is not yet supported in the CPU operator
filenames = filter(lambda filename: 'mpeg4' not in filename, filenames)
files = [np.fromfile(filename, dtype=np.uint8) for filename in filenames]
batch_size_values = [1, 3, 100]
frames_per_sequence_values = [1, 7, 100]
device_values = ['cpu', 'mixed']
def params_generator():
"""
Generates parameters for test.
To be used in the @params decorator:
@params(*list(params_generator())
The pattern of generating the parameters:
1. Generating a set of all permutations of `batch_size`, `frames_per_sequence` and `device`.
2. Assigning the test file in a round-robin fashion to every permutation of parameters.
"""
test_params = (
(dev, fps, bs)
for dev in device_values
for fps in frames_per_sequence_values
for bs in batch_size_values
)
num_test_files = len(files)
file_idx = 0
for tp in test_params:
yield tp + (files[file_idx],)
file_idx = (file_idx + 1) % num_test_files
@pipeline_def
def video_decoder_pipeline(input_name, device='cpu'):
data = fn.external_source(name=input_name, dtype=types.UINT8, ndim=1)
vid = fn.experimental.decoders.video(data, device=device)
return vid
@pipeline_def
def video_input_pipeline(input_name, sequence_length, last_sequence_policy='partial', device='cpu'):
vid = fn.experimental.inputs.video(name=input_name, device=device, blocking=False,
sequence_length=sequence_length,
last_sequence_policy=last_sequence_policy)
return vid
# Parameters common for the DALI pipelines used throughout this test.
common_pipeline_params = {
'num_threads': 1,
'device_id': 0,
'exec_pipelined': False,
'exec_async': False,
'prefetch_queue_depth': 1,
}
def get_num_frames(encoded_video):
input_name = "VIDEO_INPUT"
decoder_pipe = video_decoder_pipeline(
input_name=input_name, batch_size=1, device="cpu", **common_pipeline_params)
decoder_pipe.build()
decoder_pipe.feed_input(input_name, [encoded_video])
decoder_out = decoder_pipe.run()
return decoder_out[0].as_array()[0].shape[0]
def get_batch_outline(num_frames, frames_per_sequence, batch_size):
num_iterations = num_frames // (frames_per_sequence * batch_size)
remaining_frames = num_frames - num_iterations * frames_per_sequence * batch_size
num_full_sequences = remaining_frames // frames_per_sequence
num_frames_in_partial_sequence = remaining_frames - num_full_sequences * frames_per_sequence
return num_iterations, num_full_sequences, num_frames_in_partial_sequence
def portion_out_reference_sequence(decoder_pipe_out, frames_per_sequence, batch_size):
"""
A generator, that takes the output from VideoDecoder DALI pipeline. Then, based of the
provided parameters, it serves sequences one-by-one, which are supposed to be returned
by VideoInput operator.
"""
ref_sequence = to_array(decoder_pipe_out[0])[0]
num_frames = ref_sequence.shape[0]
n_batches = num_frames // (batch_size * frames_per_sequence)
ref_sequence = ref_sequence[:n_batches * frames_per_sequence * batch_size]
sh = ref_sequence.shape
ref_sequence = ref_sequence.reshape(
n_batches, batch_size, frames_per_sequence, sh[1], sh[2], sh[3])
for rs in ref_sequence:
yield rs
@params(*list(params_generator()))
def test_video_input_compare_with_video_decoder(device, frames_per_sequence, batch_size, test_file):
"""
Compares the VideoInput with the VideoDecoder.
"""
input_name = "VIDEO_INPUT"
decoder_pipe = video_decoder_pipeline(input_name=input_name, batch_size=1, device=device,
**common_pipeline_params)
input_pipe = video_input_pipeline(input_name=input_name, batch_size=batch_size,
sequence_length=frames_per_sequence, device=device,
**common_pipeline_params)
decoder_pipe.build()
decoder_pipe.feed_input(input_name, [test_file])
decoder_out = decoder_pipe.run()
input_pipe.build()
input_pipe.feed_input(input_name, np.array([[test_file]]))
for ref_seq in portion_out_reference_sequence(decoder_out, frames_per_sequence, batch_size):
input_out = input_pipe.run()
test_seq = to_array(input_out[0])
assert np.all(ref_seq == test_seq)
@params(*list(params_generator()))
def test_video_input_partial_vs_pad(device, frames_per_sequence, batch_size, test_video):
input_name = "VIDEO_INPUT"
partial_pipe = video_input_pipeline(input_name=input_name, batch_size=batch_size,
sequence_length=frames_per_sequence, device=device,
last_sequence_policy='partial', **common_pipeline_params)
pad_pipe = video_input_pipeline(input_name=input_name, batch_size=batch_size,
sequence_length=frames_per_sequence, device=device,
last_sequence_policy='pad', **common_pipeline_params)
num_frames = get_num_frames(test_video)
partial_pipe.build()
partial_pipe.feed_input(input_name, np.array([[test_video]]))
pad_pipe.build()
pad_pipe.feed_input(input_name, np.array([[test_video]]))
num_iterations, num_full_sequences, num_frames_in_partial_sequence = get_batch_outline(
num_frames, frames_per_sequence, batch_size)
# First, check all the full batches with full sequences
for _ in range(num_iterations):
out1 = partial_pipe.run()
out2 = pad_pipe.run()
np.testing.assert_array_equal(to_array(out1[0]), to_array(out2[0]))
if num_frames - num_iterations * frames_per_sequence * batch_size == 0:
# Frames have been split equally across batches.
return
# Now check the full sequences in the last batch
partial_out = partial_pipe.run()[0]
pad_out = pad_pipe.run()[0]
for i in range(num_full_sequences):
np.testing.assert_array_equal(to_array(partial_out[i]), to_array(pad_out[i]))
# And lastly, the actual check PARTIAL vs PAD -
# the last sequence in the last batch, which might be partial (or padded).
if num_frames_in_partial_sequence == 0:
return
last_partial_sequence = to_array(partial_out[num_full_sequences])
last_pad_sequence = to_array(pad_out[num_full_sequences])
for i in range(num_frames_in_partial_sequence):
# The frames that are in both - partial and padded sequences.
np.testing.assert_array_equal(last_partial_sequence[i], last_pad_sequence[i])
frame_shape = last_pad_sequence[0].shape
empty_frame = np.zeros(frame_shape, dtype=np.uint8)
for i in range(num_frames_in_partial_sequence, frames_per_sequence):
# The frames that are only in padded sequence.
np.testing.assert_array_equal(last_pad_sequence[i], empty_frame)
@params(*itertools.product(device_values, (1, 4)))
def test_video_input_input_queue(device, n_test_files):
"""
Checks the input queue on `fn.inputs.video` operator.
"""
input_name = "VIDEO_INPUT"
batch_size = 3
frames_per_sequence = 4
input_pipe = video_input_pipeline(input_name=input_name, batch_size=batch_size,
sequence_length=frames_per_sequence, device=device,
**common_pipeline_params)
input_pipe.build()
for i in range(n_test_files):
input_pipe.feed_input(input_name, np.array([[files[i]]]))
n_runs = 0
for i in range(n_test_files):
num_frames = get_num_frames(files[i])
ni, nfs, nfips = get_batch_outline(num_frames, frames_per_sequence, batch_size)
n_runs += ni + (1 if nfs + nfips > 0 else 0)
for _ in range(n_runs):
input_pipe.run()
# If exception has not been thrown, the test pass.
with assert_raises(
RuntimeError,
glob="No data was provided to the InputOperator. Make sure to feed it properly."):
input_pipe.run()
|
DALI-main
|
dali/test/python/input/test_video.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import math
import numpy as np
import nvidia.dali.backend
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import os
import random
from nvidia.dali import pipeline_def
from nose_utils import assert_raises
from test_utils import compare_pipelines
from test_utils import get_dali_extra_path
from test_utils import to_array
from test_utils import get_arch
from nose2.tools import params
from nose import SkipTest
def get_img_files(data_path, subdir='*', ext=None):
if subdir is None:
subdir = ''
if ext:
if isinstance(ext, (list, tuple)):
files = []
for e in ext:
files += glob.glob(data_path + f"/{subdir}/*.{e}")
else:
files = glob.glob(data_path + f"/{subdir}/*.{ext}")
return files
else:
files = glob.glob(data_path + f"/{subdir}/*.*")
txt_files = glob.glob(data_path + f"/{subdir}/*.txt")
return list(set(files) - set(txt_files))
@pipeline_def
def decoder_pipe(data_path, device, use_fast_idct=False, jpeg_fancy_upsampling=False):
inputs, labels = fn.readers.file(file_root=data_path, shard_id=0, num_shards=1, name="Reader")
decoded = fn.experimental.decoders.image(inputs, device=device, output_type=types.RGB,
use_fast_idct=use_fast_idct,
jpeg_fancy_upsampling=jpeg_fancy_upsampling)
return decoded, labels
test_data_root = get_dali_extra_path()
good_path = 'db/single'
misnamed_path = 'db/single/missnamed'
test_good_path = {'jpeg', 'mixed', 'png', 'tiff', 'pnm', 'bmp', 'jpeg2k', 'webp'}
test_misnamed_path = {'jpeg', 'png', 'tiff', 'pnm', 'bmp'}
def run_decode(data_path, batch, device, threads):
pipe = decoder_pipe(data_path=data_path, batch_size=batch, num_threads=threads, device_id=0,
device=device, prefetch_queue_depth=1)
pipe.build()
iters = math.ceil(pipe.epoch_size("Reader") / batch)
for iter in range(iters):
pipe.run()
def test_image_decoder():
for device in {'cpu', 'mixed'}:
for batch_size in {1, 10}:
for img_type in test_good_path:
for threads in {1, random.choice([2, 3, 4])}:
data_path = os.path.join(test_data_root, good_path, img_type)
yield run_decode, data_path, batch_size, device, threads
for img_type in test_misnamed_path:
for threads in {1, random.choice([2, 3, 4])}:
data_path = os.path.join(test_data_root, misnamed_path, img_type)
yield run_decode, data_path, batch_size, device, threads
@pipeline_def
def create_decoder_slice_pipeline(data_path, device):
jpegs, _ = fn.readers.file(file_root=data_path, shard_id=0, num_shards=1, name="Reader")
anchor = fn.random.uniform(range=[0.05, 0.15], shape=(2,))
shape = fn.random.uniform(range=[0.5, 0.7], shape=(2,))
images_sliced_1 = fn.experimental.decoders.image_slice(
jpegs, anchor, shape,
axes=(0, 1),
device=device, hw_decoder_load=0.7)
images = fn.experimental.decoders.image(jpegs, device=device, hw_decoder_load=0.7)
images_sliced_2 = fn.slice(images, anchor, shape, axes=(0, 1))
return images_sliced_1, images_sliced_2
@pipeline_def
def create_decoder_crop_pipeline(data_path, device):
jpegs, _ = fn.readers.file(file_root=data_path, shard_id=0, num_shards=1, name="Reader")
crop_pos_x = fn.random.uniform(range=[0.1, 0.9])
crop_pos_y = fn.random.uniform(range=[0.1, 0.9])
w = 242
h = 230
images_crop_1 = fn.experimental.decoders.image_crop(
jpegs,
crop=(w, h),
crop_pos_x=crop_pos_x,
crop_pos_y=crop_pos_y,
device=device,
hw_decoder_load=0.7,)
images = fn.experimental.decoders.image(jpegs, device=device, hw_decoder_load=0.7)
images_crop_2 = fn.crop(images, crop=(w, h), crop_pos_x=crop_pos_x, crop_pos_y=crop_pos_y)
return images_crop_1, images_crop_2
@pipeline_def
def create_decoder_random_crop_pipeline(data_path, device):
seed = 1234
jpegs, _ = fn.readers.file(file_root=data_path, shard_id=0, num_shards=1, name="Reader")
w = 242
h = 230
images_random_crop_1 = fn.experimental.decoders.image_random_crop(
jpegs, device=device, output_type=types.RGB, hw_decoder_load=0.7, seed=seed)
images_random_crop_1 = fn.resize(images_random_crop_1, size=(w, h))
images = fn.experimental.decoders.image(jpegs, device=device, hw_decoder_load=0.7)
images_random_crop_2 = fn.random_resized_crop(images, size=(w, h), seed=seed)
return images_random_crop_1, images_random_crop_2
def run_decode_fused(test_fun, path, img_type, batch, device, threads, validation_fun):
data_path = os.path.join(test_data_root, path, img_type)
pipe = test_fun(data_path=data_path, batch_size=batch, num_threads=threads, device_id=0,
device=device, prefetch_queue_depth=1)
pipe.build()
iters = math.ceil(pipe.epoch_size("Reader") / batch)
for _ in range(iters):
out_1, out_2 = pipe.run()
for img_1, img_2 in zip(out_1, out_2):
arr_1 = to_array(img_1)
arr_2 = to_array(img_2)
assert validation_fun(arr_1, arr_2), \
f"{validation_fun.__name__}\nimage: {img_1.source_info()}"
def test_image_decoder_fused():
threads = 4
batch_size = 10
for test_fun in [
create_decoder_slice_pipeline,
create_decoder_crop_pipeline,
create_decoder_random_crop_pipeline
]:
# before CUDA 11.4 HW decoder API doesn't support ROI so we get slightly different results
# HW decoder + slice vs fused which in this case is executed by the hybrid backend
if test_fun == create_decoder_random_crop_pipeline or \
nvidia.dali.backend.GetNvjpegVersion() < 11040:
# random_resized_crop can properly handle border as it has pixels that are cropped out,
# while plain resize following image_decoder_random_crop cannot do that
# and must duplicate the border pixels
def mean_close(x, y):
return np.mean(np.abs(x - y) < 0.5)
validation_fun = mean_close
else:
def mean_close(x, y):
return np.allclose(x, y)
validation_fun = mean_close
for device in {'cpu', 'mixed'}:
for img_type in test_good_path:
yield (run_decode_fused, test_fun, good_path, img_type, batch_size,
device, threads, validation_fun)
def check_FastDCT_body(batch_size, img_type, device):
data_path = os.path.join(test_data_root, good_path, img_type)
compare_pipelines(
decoder_pipe(data_path=data_path, batch_size=batch_size, num_threads=3,
device_id=0, device=device, use_fast_idct=False),
decoder_pipe(data_path=data_path, batch_size=batch_size, num_threads=3,
device_id=0, device='cpu', use_fast_idct=True),
# average difference should be no bigger than off-by-3
batch_size=batch_size, N_iterations=3, eps=3)
def test_FastDCT():
for device in {'cpu', 'mixed'}:
for batch_size in {1, 8}:
for img_type in test_good_path:
yield check_FastDCT_body, batch_size, img_type, device
def check_fancy_upsampling_body(batch_size, img_type, device):
data_path = os.path.join(test_data_root, good_path, img_type)
compare_pipelines(
decoder_pipe(data_path=data_path, batch_size=batch_size, num_threads=3,
device_id=0, device=device, jpeg_fancy_upsampling=True),
decoder_pipe(data_path=data_path, batch_size=batch_size, num_threads=3,
device_id=0, device='cpu'),
batch_size=batch_size, N_iterations=3, eps=1)
@params(1, 8)
def test_fancy_upsampling(batch_size):
if nvidia.dali.backend.GetNvjpegVersion() < 12001:
from nose import SkipTest
raise SkipTest("nvJPEG doesn't support fancy upsampling in this version")
data_path = os.path.join(test_data_root, good_path, 'jpeg')
compare_pipelines(
decoder_pipe(data_path=data_path, batch_size=batch_size, num_threads=3,
device_id=0, device='mixed', jpeg_fancy_upsampling=True),
decoder_pipe(data_path=data_path, batch_size=batch_size, num_threads=3,
device_id=0, device='cpu'),
batch_size=batch_size, N_iterations=3, eps=1)
batch_size_test = 16
@pipeline_def(batch_size=batch_size_test, device_id=0, num_threads=4)
def img_decoder_pipe(device, out_type, files):
encoded, _ = fn.readers.file(files=files)
decoded = fn.experimental.decoders.image(encoded, device=device, output_type=out_type)
return decoded
def _testimpl_image_decoder_consistency(img_out_type, file_fmt, path, subdir='*', ext=None):
eps = 1
if file_fmt == 'jpeg' or file_fmt == 'mixed':
eps = 4
if (file_fmt == 'jpeg2k' or file_fmt == 'mixed') and img_out_type == types.YCbCr:
eps = 6
files = get_img_files(os.path.join(test_data_root, path), subdir=subdir, ext=ext)
compare_pipelines(
img_decoder_pipe("cpu", out_type=img_out_type, files=files),
img_decoder_pipe("mixed", out_type=img_out_type, files=files),
batch_size=batch_size_test, N_iterations=3, eps=eps)
def test_image_decoder_consistency():
for out_img_type in [types.RGB, types.BGR, types.YCbCr, types.GRAY, types.ANY_DATA]:
for file_fmt in test_good_path:
if (file_fmt == 'jpeg2k' or file_fmt == 'mixed') and out_img_type == types.ANY_DATA:
# TODO(staniewzki, michalz) Fix ANY_DATA support in OpenCV
continue
path = os.path.join(good_path, file_fmt)
yield _testimpl_image_decoder_consistency, out_img_type, file_fmt, path
for file_fmt, path, ext in [("tiff", "db/single/multichannel/tiff_multichannel", 'tif'),
("jpeg2k", "db/single/multichannel/with_alpha", 'jp2'),
("png", "db/single/multichannel/with_alpha", 'png')]:
subdir = None # In those paths the images are not organized in subdirs
if (file_fmt == 'jpeg2k' or file_fmt == 'mixed') and out_img_type == types.ANY_DATA:
# TODO(michalz) Fix ANY_DATA support in OpenCV
continue
yield _testimpl_image_decoder_consistency, out_img_type, file_fmt, path, subdir, ext
def _testimpl_image_decoder_tiff_with_alpha_16bit(device, out_type, path, ext):
@pipeline_def(batch_size=1, device_id=0, num_threads=1)
def pipe(device, out_type, files):
encoded, _ = fn.readers.file(files=files)
decoded = fn.experimental.decoders.image(encoded, device=device, output_type=out_type)
peeked_shape = fn.experimental.peek_image_shape(encoded)
return decoded, peeked_shape
files = get_img_files(os.path.join(test_data_root, path), ext=ext, subdir=None)
pipe = pipe(device, out_type=out_type, files=files)
pipe.build()
out, shape = pipe.run()
if device == 'mixed':
out = out.as_cpu()
out = np.array(out[0])
shape = np.array(shape[0])
expected_channels = 4 if out_type == types.ANY_DATA else 1 if out_type == types.GRAY else 3
assert out.shape[2] == expected_channels, \
f"Expected {expected_channels} but got {out.shape[2]}"
def test_image_decoder_tiff_with_alpha_16bit():
for device in ['cpu', 'mixed']:
for out_type in [types.RGB, types.BGR, types.YCbCr, types.ANY_DATA]:
path = "db/single/multichannel/with_alpha_16bit"
for ext in [("png", "tiff", "jp2")]:
yield _testimpl_image_decoder_tiff_with_alpha_16bit, device, out_type, path, ext
def _testimpl_image_decoder_crop_error_oob(device):
file_root = os.path.join(test_data_root, good_path, "jpeg")
@pipeline_def(batch_size=batch_size_test, device_id=0, num_threads=4)
def pipe(device):
encoded, _ = fn.readers.file(file_root=file_root)
decoded = fn.experimental.decoders.image_crop(
encoded,
crop_w=10000, crop_h=100,
device=device)
return decoded
p = pipe(device)
p.build()
assert_raises(RuntimeError, p.run,
glob='cropping window*..*..*is not valid for image dimensions*[*x*]')
def test_image_decoder_crop_error_oob():
for device in ['cpu', 'mixed']:
yield _testimpl_image_decoder_crop_error_oob, device
def _testimpl_image_decoder_slice_error_oob(device):
file_root = os.path.join(test_data_root, good_path, "jpeg")
@pipeline_def(batch_size=batch_size_test, device_id=0, num_threads=4)
def pipe(device):
encoded, _ = fn.readers.file(file_root=file_root)
decoded = fn.experimental.decoders.image_slice(
encoded,
device=device,
end=[10000],
axes=[1])
return decoded
p = pipe(device)
p.build()
assert_raises(RuntimeError, p.run,
glob='cropping window*..*..*is not valid for image dimensions*[*x*]')
def test_image_decoder_slice_error_oob():
for device in ['cpu', 'mixed']:
yield _testimpl_image_decoder_slice_error_oob, device
# TODO(msala) Implement HW decoder in nvJPEG
# def test_pinned_input_hw_decoder():
# file_root = os.path.join(test_data_root, good_path, "jpeg")
# @pipeline_def(batch_size=128, device_id=0, num_threads=4)
# def pipe():
# encoded, _ = fn.readers.file(file_root=file_root)
# encoded_gpu = encoded.gpu()
# # encoded.gpu() should make encoded pinned
# decoded = fn.experimental.decoders.image(encoded, device="mixed")
# return decoded, encoded_gpu
# p = pipe()
# p.build()
# p.run()
def test_tiff_palette():
normal = os.path.join(test_data_root, good_path, "tiff", "0/cat-300572_640.tiff")
palette = os.path.join(test_data_root, good_path, "tiff", "0/cat-300572_640_palette.tiff")
@pipeline_def(batch_size=2, device_id=0, num_threads=1)
def pipe():
encoded, _ = fn.readers.file(files=[normal, palette])
peeked_shapes = fn.experimental.peek_image_shape(encoded)
decoded = fn.experimental.decoders.image(encoded, device='cpu')
return decoded, peeked_shapes
p = pipe()
p.build()
imgs, peeked_shapes = p.run()
assert (peeked_shapes.at(0) == peeked_shapes.at(1)).all(), \
"Invalid peeked shape of palette TIFF"
delta = np.abs(imgs.at(0).astype('float') - imgs.at(1).astype('float'))/256
assert np.quantile(delta, 0.9) < 0.05, "Original and palette TIFF differ significantly"
def _testimpl_image_decoder_peek_shape(name, expected_shape, image_type=types.ANY_DATA,
adjust_orientation=True):
file = os.path.join(test_data_root, good_path, name)
@pipeline_def(batch_size=1, device_id=0, num_threads=1)
def peek_shape_pipeline(file):
encoded, _ = fn.readers.file(files=[file])
return fn.experimental.peek_image_shape(
encoded, image_type=image_type, adjust_orientation=adjust_orientation)
pipe = peek_shape_pipeline(file)
pipe.build()
shape = tuple(np.asarray(pipe.run()[0][0]))
assert shape == expected_shape, \
f"Expected shape {expected_shape} but got {shape}"
def test_peek_shape():
tests = [
('bmp/0/cat-1245673_640.bmp', (423, 640, 3)),
('bmp/0/cat-111793_640_grayscale.bmp', (426, 640, 1)),
('jpeg/641/maracas-706753_1280.jpg', (1280, 919, 3)),
('jpeg2k/0/cat-3449999_640.jp2', (426, 640, 3)),
('tiff/0/cat-300572_640.tiff', (536, 640, 3)),
('png/0/cat-3591348_640.png', (427, 640, 3)),
('pnm/0/cat-3591348_640.pbm', (427, 640, 1)),
('tiff/0/kitty-2948404_640.tiff', (433, 640, 3)),
('tiff/0/cat-111793_640_gray.tiff', (475, 640, 1)),
('webp/lossless/cat-111793_640.webp', (426, 640, 3)),
('jpeg_lossless/0/cat-1245673_640_grayscale_16bit.jpg', (423, 640, 1)),
('multichannel/with_alpha/cat-111793_640-alpha.jp2', (426, 640, 4)),
('multichannel/with_alpha/cat-111793_640-alpha.png', (426, 640, 4)),
('multichannel/tiff_multichannel/cat-111793_640_multichannel.tif', (475, 640, 6)),
]
for name, expected_shape in tests:
yield _testimpl_image_decoder_peek_shape, name, expected_shape
yield _testimpl_image_decoder_peek_shape, \
'tiff/0/kitty-2948404_640.tiff', (433, 640, 1), types.GRAY, True
yield _testimpl_image_decoder_peek_shape, \
'bmp/0/cat-111793_640_grayscale.bmp', (426, 640, 3), types.RGB, True
def is_nvjpeg_lossless_supported(device_id):
return get_arch(device_id) >= 6.0 and nvidia.dali.backend.GetNvjpegVersion() >= 12020
@params(
('cat-1245673_640_grayscale_16bit', types.ANY_DATA, types.UINT16, 16),
('cat-3449999_640_grayscale_16bit', types.ANY_DATA, types.UINT16, 16),
('cat-3449999_640_grayscale_12bit', types.ANY_DATA, types.UINT16, 12),
('cat-3449999_640_grayscale_16bit', types.ANY_DATA, types.FLOAT, 16),
('cat-3449999_640_grayscale_12bit', types.ANY_DATA, types.FLOAT, 12),
('cat-3449999_640_grayscale_16bit', types.GRAY, types.UINT16, 16),
('cat-3449999_640_grayscale_8bit', types.ANY_DATA, types.UINT8, 8),
)
def test_image_decoder_lossless_jpeg(img_name, output_type, dtype, precision):
device_id = 0
if not is_nvjpeg_lossless_supported(device_id=device_id):
raise SkipTest('NVJPEG lossless supported on SM60+ capable devices only')
data_dir = os.path.join(test_data_root, "db/single/jpeg_lossless/0")
ref_data_dir = os.path.join(test_data_root, "db/single/reference/jpeg_lossless")
@pipeline_def(batch_size=1, device_id=device_id, num_threads=1)
def pipe(file):
encoded, _ = fn.readers.file(files=[file])
decoded = fn.experimental.decoders.image(
encoded, device='mixed', dtype=dtype, output_type=output_type)
return decoded
p = pipe(data_dir + f'/{img_name}.jpg')
p.build()
out, = p.run()
result = np.array(out[0].as_cpu())
ref = np.load(ref_data_dir + f'/{img_name}.npy')
kwargs = {}
np_dtype = types.to_numpy_type(dtype)
max_val = np_dtype(1.0) if dtype == types.FLOAT else np.iinfo(np_dtype).max
need_scaling = max_val != np_dtype(2**precision-1)
if need_scaling:
multiplier = max_val / (2**precision-1)
ref = (ref * multiplier)
if dtype != types.FLOAT:
kwargs['atol'] = 0.5 # possible rounding error
np.testing.assert_allclose(ref, result, **kwargs)
def test_image_decoder_lossless_jpeg_cpu_not_supported():
device_id = 0
if not is_nvjpeg_lossless_supported(device_id=device_id):
raise SkipTest('NVJPEG lossless supported on SM60+ capable devices only')
@pipeline_def(batch_size=1, device_id=device_id, num_threads=1)
def pipe(file):
encoded, _ = fn.readers.file(files=[file])
decoded = fn.experimental.decoders.image(
encoded, device='cpu', dtype=types.UINT16, output_type=types.ANY_DATA)
return decoded
imgfile = "db/single/jpeg_lossless/0/cat-1245673_640_grayscale_16bit.jpg"
p = pipe(os.path.join(test_data_root, imgfile))
p.build()
assert_raises(
RuntimeError, p.run,
glob='*Failed to decode a JPEG lossless (SOF-3)*Only "mixed" backend*')
|
DALI-main
|
dali/test/python/decoder/test_imgcodec.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import Pipeline, pipeline_def
import nvidia.dali.fn as fn
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import scipy.io.wavfile
import numpy as np
import os
from test_audio_decoder_utils import generate_waveforms, rosa_resample
from test_utils import compare_pipelines, get_files
names = [
"/tmp/dali_test_1C.wav",
"/tmp/dali_test_2C.wav",
"/tmp/dali_test_4C.wav"
]
freqs = [
np.array([0.02]),
np.array([0.01, 0.012]),
np.array([0.01, 0.012, 0.013, 0.014])
]
rates = [16000, 22050, 12347]
lengths = [10000, 54321, 12345]
def create_test_files():
for i in range(len(names)):
wave = generate_waveforms(lengths[i], freqs[i])
wave = (wave * 32767).round().astype(np.int16)
scipy.io.wavfile.write(names[i], rates[i], wave)
create_test_files()
rate1 = 16000
rate2 = 12999
class DecoderPipeline(Pipeline):
def __init__(self):
super().__init__(batch_size=8, num_threads=3, device_id=0,
exec_async=True, exec_pipelined=True,
output_dtype=[types.INT16, types.INT16, types.INT16, types.FLOAT,
types.FLOAT, types.FLOAT, types.FLOAT, types.FLOAT],
output_ndim=[2, 2, 1, 1, 0, 0, 0, 0])
self.file_source = ops.ExternalSource()
self.plain_decoder = ops.decoders.Audio(dtype=types.INT16)
self.resampling_decoder = ops.decoders.Audio(sample_rate=rate1, dtype=types.INT16)
self.downmixing_decoder = ops.decoders.Audio(downmix=True, dtype=types.INT16)
self.resampling_downmixing_decoder = ops.decoders.Audio(sample_rate=rate2, downmix=True,
quality=50, dtype=types.FLOAT)
def define_graph(self):
self.raw_file = self.file_source()
dec_plain, rates_plain = self.plain_decoder(self.raw_file)
dec_res, rates_res = self.resampling_decoder(self.raw_file)
dec_mix, rates_mix = self.downmixing_decoder(self.raw_file)
dec_res_mix, rates_res_mix = self.resampling_downmixing_decoder(self.raw_file)
out = [dec_plain, dec_res, dec_mix, dec_res_mix,
rates_plain, rates_res, rates_mix, rates_res_mix]
return out
def iter_setup(self):
list = []
for i in range(self.batch_size):
idx = i % len(names)
with open(names[idx], mode="rb") as f:
list.append(np.array(bytearray(f.read()), np.uint8))
self.feed_input(self.raw_file, list)
def test_decoded_vs_generated():
pipeline = DecoderPipeline()
pipeline.build()
idx = 0
for iter in range(1):
out = pipeline.run()
for i in range(len(out[0])):
plain = out[0].at(i)
res = out[1].at(i)
mix = out[2].at(i)[:, np.newaxis]
res_mix = out[3].at(i)[:, np.newaxis]
ref_len = [0, 0, 0, 0]
ref_len[0] = lengths[idx]
ref_len[1] = lengths[idx] * rate1 / rates[idx]
ref_len[2] = lengths[idx]
ref_len[3] = lengths[idx] * rate2 / rates[idx]
ref0 = generate_waveforms(ref_len[0], freqs[idx]) * 32767
ref1 = generate_waveforms(ref_len[1], freqs[idx] * (rates[idx] / rate1)) * 32767
ref2 = generate_waveforms(ref_len[2], freqs[idx]) * 32767
ref2 = ref2.mean(axis=1, keepdims=1)
ref3 = generate_waveforms(ref_len[3], freqs[idx] * (rates[idx] / rate2))
ref3 = ref3.mean(axis=1, keepdims=1)
assert out[4].at(i) == rates[idx]
assert out[5].at(i) == rate1
assert out[6].at(i) == rates[idx]
assert out[7].at(i) == rate2
# just reading - allow only for rounding
assert np.allclose(plain, ref0, rtol=0, atol=0.5)
# resampling - allow for 1e-3 dynamic range error
assert np.allclose(res, ref1, rtol=0, atol=32767 * 1e-3)
# downmixing - allow for 2 bits of error
# - one for quantization of channels, one for quantization of result
assert np.allclose(mix, ref2, rtol=0, atol=2)
# resampling with weird ratio - allow for 3e-3 dynamic range error
assert np.allclose(res_mix, ref3, rtol=0, atol=3e-3)
rosa_in1 = plain.astype(np.float32)
rosa1 = rosa_resample(rosa_in1, rates[idx], rate1)
rosa_in3 = rosa_in1 / 32767
rosa3 = rosa_resample(rosa_in3.mean(axis=1, keepdims=1), rates[idx], rate2)
assert np.allclose(res, rosa1, rtol=0, atol=32767 * 1e-3)
assert np.allclose(res_mix, rosa3, rtol=0, atol=3e-3)
idx = (idx + 1) % len(names)
batch_size_alias_test = 16
@pipeline_def(batch_size=batch_size_alias_test, device_id=0, num_threads=4)
def decoder_pipe(decoder_op, fnames, sample_rate, downmix, quality, dtype):
encoded, _ = fn.readers.file(files=fnames)
decoded, rates = decoder_op(encoded, sample_rate=sample_rate, downmix=downmix, quality=quality,
dtype=dtype)
return decoded, rates
def check_audio_decoder_alias(sample_rate, downmix, quality, dtype):
new_pipe = decoder_pipe(fn.decoders.audio, names, sample_rate, downmix, quality, dtype)
legacy_pipe = decoder_pipe(fn.audio_decoder, names, sample_rate, downmix, quality, dtype)
compare_pipelines(new_pipe, legacy_pipe, batch_size_alias_test, 10)
def test_audio_decoder_alias():
for sample_rate in [None, 16000, 12999]:
for downmix in [False, True]:
for quality in [0, 50, 100]:
for dtype in [types.INT16, types.INT32, types.FLOAT]:
yield check_audio_decoder_alias, sample_rate, downmix, quality, dtype
def check_audio_decoder_correctness(fmt, dtype):
batch_size = 16
niterations = 10
@pipeline_def(batch_size=batch_size, device_id=0, num_threads=4)
def audio_decoder_pipe(fnames, dtype, downmix=False):
encoded, _ = fn.readers.file(files=fnames)
decoded, _ = fn.decoders.audio(encoded, dtype=dtype, downmix=downmix)
return decoded
audio_files = get_files(os.path.join('db', 'audio', fmt), fmt)
npy_files = [os.path.splitext(fpath)[0] + '.npy' for fpath in audio_files]
pipe = audio_decoder_pipe(audio_files, dtype)
pipe.build()
for it in range(niterations):
data = pipe.run()
for s in range(batch_size):
sample_idx = (it * batch_size + s) % len(audio_files)
ref = np.load(npy_files[sample_idx])
if len(ref.shape) == 1:
ref = np.expand_dims(ref, 1)
arr = np.array(data[0][s])
assert arr.shape == ref.shape
if fmt == 'ogg':
# For OGG Vorbis, we consider errors any value that is off by more than 1
# TODO(janton): There is a bug in libsndfile that produces underflow/overflow.
# Remove this when the bug is fixed.
# Tuple with two arrays, we just need the first dimension
wrong_values = np.where(np.abs(arr - ref) > 1)[0]
nerrors = len(wrong_values)
assert nerrors <= 1
# TODO(janton): Uncomment this when the bug is fixed
# np.testing.assert_allclose(arr, ref, atol=1)
else:
np.testing.assert_equal(arr, ref)
def test_audio_decoder_correctness():
dtype = types.INT16
for fmt in ['wav', 'flac', 'ogg']:
yield check_audio_decoder_correctness, fmt, dtype
|
DALI-main
|
dali/test/python/decoder/test_audio.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import math
import numpy as np
import nvidia.dali.backend
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import os
import random
from nvidia.dali import pipeline_def
from nose2.tools import params
from nose_utils import assert_raises
from test_utils import check_output_pattern
from test_utils import compare_pipelines
from test_utils import get_dali_extra_path
from test_utils import to_array
def get_img_files(data_path, subdir='*', ext=None):
if subdir is None:
subdir = ''
if ext:
if isinstance(ext, (list, tuple)):
files = []
for e in ext:
files += glob.glob(data_path + f"/{subdir}/*.{e}")
else:
files = glob.glob(data_path + f"/{subdir}/*.{ext}")
return files
else:
files = glob.glob(data_path + f"/{subdir}/*.*")
txt_files = glob.glob(data_path + f"/{subdir}/*.txt")
return list(set(files) - set(txt_files))
@pipeline_def
def decoder_pipe(data_path, device, use_fast_idct=False, memory_stats=False,
jpeg_fancy_upsampling=False):
inputs, labels = fn.readers.file(file_root=data_path, shard_id=0, num_shards=1, name="Reader")
decoded = fn.decoders.image(inputs, device=device, output_type=types.RGB,
use_fast_idct=use_fast_idct, memory_stats=memory_stats,
jpeg_fancy_upsampling=jpeg_fancy_upsampling)
return decoded, labels
test_data_root = get_dali_extra_path()
good_path = 'db/single'
misnamed_path = 'db/single/missnamed'
test_good_path = {'jpeg', 'mixed', 'png', 'tiff', 'pnm', 'bmp', 'jpeg2k', 'webp'}
test_misnamed_path = {'jpeg', 'png', 'tiff', 'pnm', 'bmp'}
def run_decode(_img_type, data_path, batch, device, threads, memory_stats=False):
pipe = decoder_pipe(data_path=data_path, batch_size=batch, num_threads=threads, device_id=0,
device=device, memory_stats=memory_stats, prefetch_queue_depth=1)
pipe.build()
iters = math.ceil(pipe.epoch_size("Reader") / batch)
for _ in range(iters):
outs = pipe.run()
del outs
del pipe
def test_image_decoder():
for device in {'cpu', 'mixed'}:
for batch_size in {1, 10}:
for img_type in test_good_path:
for threads in {1, random.choice([2, 3, 4])}:
data_path = os.path.join(test_data_root, good_path, img_type)
yield run_decode, img_type, data_path, batch_size, device, threads
for img_type in test_misnamed_path:
for threads in {1, random.choice([2, 3, 4])}:
data_path = os.path.join(test_data_root, misnamed_path, img_type)
yield run_decode, img_type, data_path, batch_size, device, threads
@pipeline_def
def create_decoder_slice_pipeline(data_path, device):
jpegs, _ = fn.readers.file(file_root=data_path, shard_id=0, num_shards=1, name="Reader")
anchor = fn.random.uniform(range=[0.05, 0.15], shape=(2,))
shape = fn.random.uniform(range=[0.5, 0.7], shape=(2,))
images_sliced_1 = fn.decoders.image_slice(jpegs, anchor, shape, device=device,
hw_decoder_load=0.7, output_type=types.RGB,
axes=(0, 1))
images = fn.decoders.image(jpegs, device=device, hw_decoder_load=0.7, output_type=types.RGB)
images_sliced_2 = fn.slice(images, anchor, shape, axes=(0, 1))
return images_sliced_1, images_sliced_2
@pipeline_def
def create_decoder_crop_pipeline(data_path, device):
jpegs, _ = fn.readers.file(file_root=data_path, shard_id=0, num_shards=1, name="Reader")
crop_pos_x = fn.random.uniform(range=[0.1, 0.9])
crop_pos_y = fn.random.uniform(range=[0.1, 0.9])
w = 242
h = 230
images_crop_1 = fn.decoders.image_crop(jpegs, device=device, output_type=types.RGB,
hw_decoder_load=0.7, crop=(w, h), crop_pos_x=crop_pos_x,
crop_pos_y=crop_pos_y)
images = fn.decoders.image(jpegs, device=device, hw_decoder_load=0.7, output_type=types.RGB)
images_crop_2 = fn.crop(images, crop=(w, h), crop_pos_x=crop_pos_x, crop_pos_y=crop_pos_y)
return images_crop_1, images_crop_2
@pipeline_def
def create_decoder_random_crop_pipeline(data_path, device):
seed = 1234
jpegs, _ = fn.readers.file(file_root=data_path, shard_id=0, num_shards=1, name="Reader")
w = 242
h = 230
images_random_crop_1 = fn.decoders.image_random_crop(
jpegs, device=device, output_type=types.RGB, hw_decoder_load=0.7, seed=seed)
images_random_crop_1 = fn.resize(images_random_crop_1, size=(w, h))
images = fn.decoders.image(jpegs, device=device, hw_decoder_load=0.7, output_type=types.RGB)
images_random_crop_2 = fn.random_resized_crop(images, size=(w, h), seed=seed)
return images_random_crop_1, images_random_crop_2
def run_decode_fused(test_fun, path, img_type, batch, device, threads, validation_fun):
data_path = os.path.join(test_data_root, path, img_type)
pipe = test_fun(data_path=data_path, batch_size=batch, num_threads=threads, device_id=0,
device=device, prefetch_queue_depth=1)
pipe.build()
iters = math.ceil(pipe.epoch_size("Reader") / batch)
for _ in range(iters):
out_1, out_2 = pipe.run()
for img_1, img_2 in zip(out_1, out_2):
img_1 = to_array(img_1)
img_2 = to_array(img_2)
assert validation_fun(img_1, img_2)
def test_image_decoder_fused():
threads = 4
batch_size = 10
for test_fun in [
create_decoder_slice_pipeline,
create_decoder_crop_pipeline,
create_decoder_random_crop_pipeline
]:
# before CUDA 11.4 HW decoder API doesn't support ROI so we get slightly different results
# HW decoder + slice vs fused which in this case is executed by the hybrid backend
if test_fun == create_decoder_random_crop_pipeline or \
nvidia.dali.backend.GetNvjpegVersion() < 11040:
# random_resized_crop can properly handle border as it has pixels that are cropped out,
# while plain resize following image_decoder_random_crop cannot do that
# and must duplicate the border pixels
def mean_close(x, y):
return np.mean(np.abs(x - y) < 0.5)
validation_fun = mean_close
else:
def mean_close(x, y):
return np.allclose(x, y)
validation_fun = mean_close
for device in {'cpu', 'mixed'}:
for img_type in test_good_path:
yield run_decode_fused, test_fun, good_path, img_type, batch_size, \
device, threads, validation_fun
def check_FastDCT_body(batch_size, img_type, device):
data_path = os.path.join(test_data_root, good_path, img_type)
compare_pipelines(
decoder_pipe(data_path=data_path, batch_size=batch_size, num_threads=3,
device_id=0, device=device, use_fast_idct=False),
decoder_pipe(data_path=data_path, batch_size=batch_size, num_threads=3,
device_id=0, device='cpu', use_fast_idct=True),
# average difference should be no bigger than off-by-3
batch_size=batch_size, N_iterations=3, eps=3)
def test_FastDCT():
for device in {'cpu', 'mixed'}:
for batch_size in {1, 8}:
for img_type in test_good_path:
yield check_FastDCT_body, batch_size, img_type, device
def check_fancy_upsampling_body(batch_size, img_type, device):
data_path = os.path.join(test_data_root, good_path, img_type)
compare_pipelines(
decoder_pipe(data_path=data_path, batch_size=batch_size, num_threads=3,
device_id=0, device=device, jpeg_fancy_upsampling=True),
decoder_pipe(data_path=data_path, batch_size=batch_size, num_threads=3,
device_id=0, device='cpu'),
batch_size=batch_size, N_iterations=3, eps=1)
@params(1, 8)
def test_fancy_upsampling(batch_size):
if nvidia.dali.backend.GetNvjpegVersion() < 12001:
from nose import SkipTest
raise SkipTest("nvJPEG doesn't support fancy upsampling in this version")
data_path = os.path.join(test_data_root, good_path, 'jpeg')
compare_pipelines(
decoder_pipe(data_path=data_path, batch_size=batch_size, num_threads=3,
device_id=0, device='mixed', jpeg_fancy_upsampling=True),
decoder_pipe(data_path=data_path, batch_size=batch_size, num_threads=3,
device_id=0, device='cpu'),
batch_size=batch_size, N_iterations=3, eps=1)
def test_image_decoder_memory_stats():
device = 'mixed'
img_type = 'jpeg'
def check(img_type, size, device, threads):
data_path = os.path.join(test_data_root, good_path, img_type)
# largest allocation should match our (in this case) memory padding settings
# (assuming no reallocation was needed here as the hint is big enough)
pattern = r'Device memory: \d+ allocations, largest = 16777216 bytes\n.*' \
r'Host \(pinned|regular\) memory: \d+ allocations, largest = 8388608 bytes\n'
with check_output_pattern(pattern):
run_decode(img_type, data_path, size, device, threads, memory_stats=True)
for size in {1, 10}:
for threads in {1, random.choice([2, 3, 4])}:
yield check, img_type, size, device, threads
batch_size_test = 16
@pipeline_def(batch_size=batch_size_test, device_id=0, num_threads=4)
def img_decoder_pipe(device, out_type, files):
encoded, _ = fn.readers.file(files=files)
decoded = fn.decoders.image(encoded, device=device, output_type=out_type)
return decoded
def _testimpl_image_decoder_consistency(img_out_type, file_fmt, path, subdir='*', ext=None):
eps = 1
if file_fmt == 'jpeg' or file_fmt == 'mixed':
eps = 4
if (file_fmt == 'jpeg2k' or file_fmt == 'mixed') and img_out_type == types.YCbCr:
eps = 6
files = get_img_files(os.path.join(test_data_root, path), subdir=subdir, ext=ext)
compare_pipelines(
img_decoder_pipe("cpu", out_type=img_out_type, files=files),
img_decoder_pipe("mixed", out_type=img_out_type, files=files),
batch_size=batch_size_test, N_iterations=3, eps=eps)
def test_image_decoder_consistency():
for out_img_type in [types.RGB, types.BGR, types.YCbCr, types.GRAY, types.ANY_DATA]:
for file_fmt in test_good_path:
path = os.path.join(good_path, file_fmt)
yield _testimpl_image_decoder_consistency, out_img_type, file_fmt, path
for file_fmt, path, ext in [("tiff", "db/single/multichannel/tiff_multichannel", 'tif'),
("jpeg2k", "db/single/multichannel/with_alpha", 'jp2'),
("png", "db/single/multichannel/with_alpha", 'png')]:
subdir = None # In those paths the images are not organized in subdirs
yield _testimpl_image_decoder_consistency, out_img_type, file_fmt, path, subdir, ext
def _testimpl_image_decoder_tiff_with_alpha_16bit(device, out_type, path, ext):
@pipeline_def(batch_size=1, device_id=0, num_threads=1)
def pipe(device, out_type, files):
encoded, _ = fn.readers.file(files=files)
decoded = fn.decoders.image(encoded, device=device, output_type=out_type)
peeked_shape = fn.peek_image_shape(encoded)
return decoded, peeked_shape
files = get_img_files(os.path.join(test_data_root, path), ext=ext, subdir=None)
pipe = pipe(device, out_type=out_type, files=files)
pipe.build()
out, shape = pipe.run()
if device == 'mixed':
out = out.as_cpu()
out = np.array(out[0])
shape = np.array(shape[0])
expected_channels = 4 if out_type == types.ANY_DATA else 1 if out_type == types.GRAY else 3
assert out.shape[2] == expected_channels, \
f"Expected {expected_channels} but got {out.shape[2]}"
def test_image_decoder_tiff_with_alpha_16bit():
for device in ['cpu', 'mixed']:
for out_type in [types.RGB, types.BGR, types.YCbCr, types.ANY_DATA]:
path = "db/single/multichannel/with_alpha_16bit"
for ext in [("png", "tiff", "jp2")]:
yield _testimpl_image_decoder_tiff_with_alpha_16bit, device, out_type, path, ext
@pipeline_def(batch_size=batch_size_test, device_id=0, num_threads=4)
def decoder_pipe_with_name(decoder_op, file_root, device, use_fast_idct):
encoded, _ = fn.readers.file(file_root=file_root)
decoded = decoder_op(encoded, device=device, output_type=types.RGB, use_fast_idct=use_fast_idct,
seed=42)
return decoded
def check_image_decoder_alias(new_op, old_op, file_root, device, use_fast_idct):
new_pipe = decoder_pipe_with_name(new_op, file_root, device, use_fast_idct)
legacy_pipe = decoder_pipe_with_name(old_op, file_root, device, use_fast_idct)
compare_pipelines(new_pipe, legacy_pipe, batch_size=batch_size_test, N_iterations=3)
def test_image_decoder_alias():
data_path = os.path.join(test_data_root, good_path, "jpeg")
for new_op, old_op in [(fn.decoders.image, fn.image_decoder),
(fn.decoders.image_crop, fn.image_decoder_crop),
(fn.decoders.image_random_crop, fn.image_decoder_random_crop)]:
for device in ["cpu", "mixed"]:
for use_fast_idct in [True, False]:
yield check_image_decoder_alias, new_op, old_op, data_path, device, use_fast_idct
@pipeline_def(batch_size=batch_size_test, device_id=0, num_threads=4)
def decoder_slice_pipe(decoder_op, file_root, device, use_fast_idct):
encoded, _ = fn.readers.file(file_root=file_root)
start = types.Constant(np.array([0., 0.]))
end = types.Constant(np.array([0.5, 0.5]))
decoded = decoder_op(encoded, start, end, device=device, output_type=types.RGB,
use_fast_idct=use_fast_idct)
return decoded
def check_image_decoder_slice_alias(new_op, old_op, file_root, device, use_fast_idct):
new_pipe = decoder_slice_pipe(new_op, file_root, device, use_fast_idct)
legacy_pipe = decoder_slice_pipe(old_op, file_root, device, use_fast_idct)
compare_pipelines(new_pipe, legacy_pipe, batch_size=batch_size_test, N_iterations=3)
def test_image_decoder_slice_alias():
data_path = os.path.join(test_data_root, good_path, "jpeg")
new_op, old_op = fn.decoders.image_slice, fn.image_decoder_slice
for device in ["cpu", "mixed"]:
for use_fast_idct in [True, False]:
yield check_image_decoder_slice_alias, new_op, old_op, data_path, device, use_fast_idct
def _testimpl_image_decoder_crop_error_oob(device):
file_root = os.path.join(test_data_root, good_path, "jpeg")
@pipeline_def(batch_size=batch_size_test, device_id=0, num_threads=4)
def pipe(device):
encoded, _ = fn.readers.file(file_root=file_root)
decoded = fn.decoders.image_crop(encoded, device=device, crop_w=10000, crop_h=100)
return decoded
p = pipe(device)
p.build()
assert_raises(RuntimeError, p.run,
glob='cropping window*..*..*is not valid for image dimensions*[*x*]')
def test_image_decoder_crop_error_oob():
for device in ['cpu', 'mixed']:
yield _testimpl_image_decoder_crop_error_oob, device
def _testimpl_image_decoder_slice_error_oob(device):
file_root = os.path.join(test_data_root, good_path, "jpeg")
@pipeline_def(batch_size=batch_size_test, device_id=0, num_threads=4)
def pipe(device):
encoded, _ = fn.readers.file(file_root=file_root)
decoded = fn.decoders.image_slice(encoded, device=device, end=[10000], axes=[1])
return decoded
p = pipe(device)
p.build()
assert_raises(RuntimeError, p.run,
glob='cropping window*..*..*is not valid for image dimensions*[*x*]')
def test_image_decoder_slice_error_oob():
for device in ['cpu', 'mixed']:
yield _testimpl_image_decoder_slice_error_oob, device
def test_pinned_input_hw_decoder():
file_root = os.path.join(test_data_root, good_path, "jpeg")
@pipeline_def(batch_size=128, device_id=0, num_threads=4)
def pipe():
encoded, _ = fn.readers.file(file_root=file_root)
encoded_gpu = encoded.gpu()
# encoded.gpu() should make encoded pinned
decoded = fn.decoders.image(encoded, device="mixed")
return decoded, encoded_gpu
p = pipe()
p.build()
p.run()
def test_tiff_palette():
normal = os.path.join(test_data_root, good_path, "tiff", "0/cat-300572_640.tiff")
palette = os.path.join(test_data_root, good_path, "tiff", "0/cat-300572_640_palette.tiff")
@pipeline_def(batch_size=2, device_id=0, num_threads=1)
def pipe():
encoded, _ = fn.readers.file(files=[normal, palette])
peeked_shapes = fn.peek_image_shape(encoded)
decoded = fn.decoders.image(encoded, device='cpu')
return decoded, peeked_shapes
p = pipe()
p.build()
imgs, peeked_shapes = p.run()
assert (peeked_shapes.at(0) == peeked_shapes.at(1)).all(), \
"Invalid peeked shape of palette TIFF"
delta = np.abs(imgs.at(0).astype('float') - imgs.at(1).astype('float'))/256
assert np.quantile(delta, 0.9) < 0.05, "Original and palette TIFF differ significantly"
|
DALI-main
|
dali/test/python/decoder/test_image.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali.fn as fn
from nvidia.dali import pipeline_def
import numpy as np
import cv2
import nvidia.dali.types as types
import glob
from itertools import cycle
from test_utils import get_dali_extra_path, is_mulit_gpu
from nvidia.dali.backend import TensorListGPU
from nose2.tools import params
from nose import SkipTest
from nose.plugins.attrib import attr
filenames = glob.glob(f'{get_dali_extra_path()}/db/video/[cv]fr/*.mp4')
# filter out HEVC because some GPUs do not support it
filenames = filter(lambda filename: 'hevc' not in filename, filenames)
# mpeg4 is not yet supported in the CPU operator itself
filenames = filter(lambda filename: 'mpeg4' not in filename, filenames)
files = [np.fromfile(
filename, dtype=np.uint8) for filename in filenames]
@pipeline_def(device_id=0)
def video_decoder_pipeline(source, device='cpu'):
data = fn.external_source(source=source, dtype=types.UINT8, ndim=1)
return fn.experimental.decoders.video(data, device=device)
def video_length(filename):
cap = cv2.VideoCapture(filename)
return int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
@pipeline_def(batch_size=1, num_threads=1, device_id=0)
def reference_pipeline(filename, device='cpu'):
seq_length = video_length(filename)
return fn.experimental.readers.video(filenames=[filename], sequence_length=seq_length,
device=device)
def video_loader(batch_size, epochs):
idx = 0
while idx < epochs * len(files):
batch = []
for _ in range(batch_size):
batch.append(files[idx % len(files)])
idx = idx + 1
yield batch
def video_decoder_iter(batch_size, epochs=1, device='cpu'):
pipe = video_decoder_pipeline(batch_size=batch_size, device_id=0, num_threads=4,
source=video_loader(batch_size, epochs), device=device)
pipe.build()
for _ in range(int((epochs * len(files) + batch_size - 1) / batch_size)):
output, = pipe.run()
if isinstance(output, TensorListGPU):
output = output.as_cpu()
for i in range(batch_size):
yield np.array(output[i])
def ref_iter(epochs=1, device='cpu'):
for _ in range(epochs):
for filename in filenames:
pipe = reference_pipeline(filename, device=device)
pipe.build()
output, = pipe.run()
if isinstance(output, TensorListGPU):
output = output.as_cpu()
yield np.array(output[0])
@params('cpu', 'mixed')
def test_video_decoder(device):
batch_size = 3
epochs = 3
decoder_iter = video_decoder_iter(batch_size, epochs, device)
ref_dec_iter = ref_iter(epochs, device='cpu' if device == 'cpu' else 'gpu')
for seq, ref_seq in zip(decoder_iter, ref_dec_iter):
assert seq.shape == ref_seq.shape
assert np.array_equal(seq, ref_seq)
def test_full_range_video():
@pipeline_def
def test_pipeline():
videos = fn.readers.video(
device="gpu",
filenames=[get_dali_extra_path() + '/db/video/full_dynamic_range/video.mp4'],
sequence_length=1,
initial_fill=10,
normalized=False,
dtype=types.UINT8)
return videos
video_pipeline = test_pipeline(batch_size=1, num_threads=1, device_id=0)
video_pipeline.build()
o = video_pipeline.run()
out = o[0].as_cpu().as_array()
ref = cv2.imread(get_dali_extra_path() + '/db/video/full_dynamic_range/0001.png')
ref = cv2.cvtColor(ref, cv2.COLOR_BGR2RGB)
left = ref
right = out
absdiff = np.abs(left.astype(int) - right.astype(int))
assert np.mean(absdiff) < 2
@params('cpu', 'gpu')
def test_full_range_video_in_memory(device):
@pipeline_def
def test_pipeline():
videos = fn.experimental.readers.video(
device=device,
filenames=[get_dali_extra_path() + '/db/video/full_dynamic_range/video.mp4'],
sequence_length=1)
return videos
video_pipeline = test_pipeline(batch_size=1, num_threads=1, device_id=0)
video_pipeline.build()
o = video_pipeline.run()
out = o[0]
if device == "gpu":
out = out.as_cpu()
out = out.as_array()
ref = cv2.imread(get_dali_extra_path() + '/db/video/full_dynamic_range/0001.png')
ref = cv2.cvtColor(ref, cv2.COLOR_BGR2RGB)
left = ref
right = out
absdiff = np.abs(left.astype(int) - right.astype(int))
assert np.mean(absdiff) < 2
@attr('multi_gpu')
@params('cpu', 'mixed')
def test_multi_gpu_video(device):
if not is_mulit_gpu():
raise SkipTest()
batch_size = 1
def input_gen(batch_size):
filenames = glob.glob(f'{get_dali_extra_path()}/db/video/[cv]fr/*.mp4')
# test overflow of frame_buffer_
filenames.append(f'{get_dali_extra_path()}/db/video/cfr_test.mp4')
filenames = filter(lambda filename: 'mpeg4' not in filename, filenames)
filenames = filter(lambda filename: 'hevc' not in filename, filenames)
filenames = cycle(filenames)
while True:
batch = []
for _ in range(batch_size):
batch.append(np.fromfile(next(filenames), dtype=np.uint8))
yield batch
@pipeline_def
def test_pipeline():
vid = fn.external_source(device='cpu', source=input_gen(batch_size))
seq = fn.experimental.decoders.video(vid, device=device)
return seq
video_pipeline_0 = test_pipeline(batch_size=1, num_threads=1, device_id=0)
video_pipeline_1 = test_pipeline(batch_size=1, num_threads=1, device_id=1)
video_pipeline_0.build()
video_pipeline_1.build()
iters = 5
for _ in range(iters):
video_pipeline_0.run()
video_pipeline_1.run()
@params('cpu', 'gpu')
def test_source_info(device):
filenames = glob.glob(f'{get_dali_extra_path()}/db/video/[cv]fr/*.mp4')
# filter out HEVC because some GPUs do not support it
filenames = filter(lambda filename: 'hevc' not in filename, filenames)
# mpeg4 is not yet supported in the CPU operator itself
filenames = filter(lambda filename: 'mpeg4' not in filename, filenames)
files = list(filenames)
@pipeline_def
def test_pipeline():
videos = fn.experimental.readers.video(
device=device,
filenames=files,
sequence_length=1,
step=10000000, # make sure that each video has only one valid sequence
)
return videos
batch_size = 4
p = test_pipeline(batch_size=batch_size, num_threads=1, device_id=0)
p.build()
samples_read = 0
while samples_read < len(files):
o = p.run()
for idx, t in enumerate(o[0]):
assert t.source_info() == files[(samples_read + idx) % len(files)]
samples_read += batch_size
|
DALI-main
|
dali/test/python/decoder/test_video.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline, DataNode
from nvidia.dali import pipeline_def, fn
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali.math as math
from nvidia.dali.tensors import TensorListGPU
import numpy as np
from nose.tools import assert_equals
from nose.plugins.attrib import attr
from nose2.tools import params
import itertools
from test_utils import np_type_to_dali
from nose_utils import raises, assert_raises
def list_product(*args):
return list(itertools.product(*args))
# Some test in this file are marked as `slow`. They cover all possible type and input kind
# combinations. The rest of the test cover only subset of selected cases to allow
# running time reduction.
batch_size = 4
# Shape of the samples, currently forces the sample to have be covered by more than 1 tile
def shape_big(arg_idx):
return [(1024, 1024)] * batch_size
# For the coverage of all type combinations we use smaller batch
def shape_small(arg_idx):
return [(42, 3), (4, 16), (8, 2), (1, 64)]
# A number used to test constant inputs
magic_number = 7
unary_input_kinds = [
"cpu", "gpu", "cpu_scalar", "gpu_scalar", "cpu_scalar_legacy", "gpu_scalar_legacy"
]
all_input_kinds = [
"cpu", "gpu", "cpu_scalar", "gpu_scalar", "cpu_scalar_legacy", "gpu_scalar_legacy", "const"
]
# We cannot have 'Constant x Constant' operations with DALI op.
# And scalar is still a represented by a Tensor, so 'Scalar x Constant' is the same
# as 'Tensor x Constant'.
bin_input_kinds = (list_product(["cpu", "gpu"], all_input_kinds)
+ list_product(["cpu_scalar", "gpu_scalar", "const"], ["cpu", "gpu"]))
ternary_input_kinds = list_product(all_input_kinds, all_input_kinds, all_input_kinds)
ternary_input_kinds.remove(("const", "const", "const"))
integer_types = [
np.bool_, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64
]
# float16 is marked as TODO in backend for gpu
float_types = [np.float32, np.float64]
input_types = integer_types + float_types
selected_input_types = [np.bool_, np.int32, np.uint8, np.float32]
selected_input_arithm_types = [np.int32, np.uint8, np.float32]
selected_bin_input_kinds = [("cpu", "cpu"), ("gpu", "gpu"), ("cpu", "cpu_scalar"),
("gpu", "gpu_scalar"), ("const", "cpu"), ("const", "gpu")]
selected_ternary_input_kinds = [("cpu", "cpu", "cpu"), ("gpu", "gpu", "gpu"),
("cpu", "const", "const"), ("gpu", "const", "const"),
("gpu", "cpu", "cpu_scalar"),
("cpu_scalar", "cpu_scalar", "cpu_scalar")]
bench_ternary_input_kinds = [("cpu", "cpu", "cpu"), ("gpu", "gpu", "gpu"),
("cpu", "const", "const"), ("gpu", "const", "const"),
("cpu", "cpu", "const"), ("gpu", "gpu", "const")]
unary_operations = [((lambda x: +x), "+"), ((lambda x: -x), "-")]
def sane_pow(x, y):
if np.issubdtype(x.dtype, np.integer) and np.issubdtype(y.dtype, np.integer):
# numpy likes to rise errors, we prefer to have integers to negative powers result in 0.
return np.where(y >= 0, np.power(x, y, where=y >= 0), 0)
else:
return np.power(x, y)
# For math functions we used limited ranges to not have too many NaNs or exceptions in the test.
def pos_range(*types):
return [(1, 20) if np.issubdtype(t, np.integer) else (0.5, 20.0) for t in types]
# The range that is supposed to be [-1, 1], but we extend it a bit.
def one_range(*types):
return [(-2, 2) if np.issubdtype(t, np.integer) else (-1.5, 1.5) for t in types]
def limited_range(*types):
# Limit the range so we do not end with comparing just the infinities in results.
return [(-30, 30) for _ in types]
def pow_range(*_):
return [(-15, 15), (-4, 4)]
def default_range(*types):
return [None for _ in types]
math_function_operations = [
((lambda x: math.sqrt(x)), (lambda x: np.sqrt(x)), "sqrt", pos_range, 1e-6),
((lambda x: math.rsqrt(x)), (lambda x: 1.0 / np.sqrt(x)), "rsqrt", pos_range, 1e-5),
((lambda x: math.cbrt(x)), (lambda x: np.cbrt(x)), "cbrt", default_range, 1e-6),
((lambda x: math.exp(x)), (lambda x: np.exp(x)), "exp", limited_range, 1e-6),
((lambda x: math.log(x)), (lambda x: np.log(x)), "log", pos_range, 1e-6),
((lambda x: math.log2(x)), (lambda x: np.log2(x)), "log2", pos_range, 1e-6),
((lambda x: math.log10(x)), (lambda x: np.log10(x)), "log10", pos_range, 1e-6),
((lambda x: math.fabs(x)), (lambda x: np.fabs(x)), "fabs", default_range, 1e-6),
((lambda x: math.floor(x)), (lambda x: np.floor(x)), "floor", default_range, 1e-6),
((lambda x: math.ceil(x)), (lambda x: np.ceil(x)), "ceil", default_range, 1e-6),
((lambda x: math.sin(x)), (lambda x: np.sin(x)), "sin", default_range, 1e-6),
((lambda x: math.cos(x)), (lambda x: np.cos(x)), "cos", default_range, 1e-6),
((lambda x: math.tan(x)), (lambda x: np.tan(x)), "tan", default_range, 1e-6),
((lambda x: math.asin(x)), (lambda x: np.arcsin(x)), "asin", one_range, 1e-6),
((lambda x: math.acos(x)), (lambda x: np.arccos(x)), "acos", one_range, 1e-6),
((lambda x: math.atan(x)), (lambda x: np.arctan(x)), "atan", default_range, 1e-6),
((lambda x: math.sinh(x)), (lambda x: np.sinh(x)), "sinh", default_range, 1e-6),
((lambda x: math.cosh(x)), (lambda x: np.cosh(x)), "cosh", default_range, 1e-6),
((lambda x: math.tanh(x)), (lambda x: np.tanh(x)), "tanh", default_range, 1e-6),
((lambda x: math.asinh(x)), (lambda x: np.arcsinh(x)), "asinh", limited_range, 1e-6),
((lambda x: math.acosh(x)), (lambda x: np.arccosh(x)), "acosh", pos_range, 1e-6),
((lambda x: math.atanh(x)), (lambda x: np.arctanh(x)), "atanh", one_range, 1e-6)
]
sane_operations = [
((lambda x, y: x + y), "+", default_range), ((lambda x, y: x - y), "-", default_range),
((lambda x, y: x * y), "*", default_range), (((lambda x, y: x**y), sane_pow), "**", pow_range),
(((lambda x, y: math.pow(x, y)), sane_pow), "pow", pow_range),
(((lambda x, y: math.min(x, y)), (lambda x, y: np.minimum(x, y))), "min", default_range),
(((lambda x, y: math.max(x, y)), (lambda x, y: np.maximum(x, y))), "max", default_range)
]
floaty_operations = [(((lambda x, y: x / y), (lambda x, y: x / y)), "/", default_range),
(((lambda x, y: math.fpow(x, y)), sane_pow), "fpow", pow_range),
(((lambda x, y: math.atan2(x, y)), (lambda x, y: np.arctan2(x, y))), "atan2",
default_range)]
bitwise_operations = [((lambda x, y: x & y), "&"), ((lambda x, y: x | y), "|"),
((lambda x, y: x ^ y), "^")]
comparisons_operations = [
((lambda x, y: x == y), "=="),
((lambda x, y: x != y), "!="),
((lambda x, y: x < y), "<"),
((lambda x, y: x <= y), "<="),
((lambda x, y: x > y), ">"),
((lambda x, y: x >= y), ">="),
]
# The observable behaviour for hi < lo is the same as numpy
ternary_operations = [(((lambda v, lo, hi: math.clamp(v, lo, hi)),
(lambda v, lo, hi: np.clip(v, lo, hi))), "clamp")]
def as_cpu(tl):
if isinstance(tl, TensorListGPU):
return tl.as_cpu()
return tl
def max_dtype(kind, left_dtype, right_dtype):
return np.dtype(kind + str(max(left_dtype.itemsize, right_dtype.itemsize)))
def float_bin_promote(left_dtype, right_dtype):
if 'f' in left_dtype.kind and 'f' not in right_dtype.kind:
return left_dtype
if 'f' not in left_dtype.kind and 'f' in right_dtype.kind:
return right_dtype
return max_dtype('f', left_dtype, right_dtype)
def signed_unsigned_bin_promote(signed_type, unsigned_type):
# Treat the booleans as smaller than anything
if unsigned_type.kind == 'b':
return signed_type
if signed_type.itemsize > unsigned_type.itemsize:
return np.dtype('i' + str(signed_type.itemsize))
itemsize = min(unsigned_type.itemsize * 2, 8)
return np.dtype('i' + str(itemsize))
def bin_promote_dtype(left_dtype, right_dtype):
if left_dtype == right_dtype:
return left_dtype
if 'f' in left_dtype.kind or 'f' in right_dtype.kind:
return float_bin_promote(left_dtype, right_dtype)
if 'b' in left_dtype.kind and 'b' in right_dtype.kind:
return np.dtype(np.bool_)
if 'i' in left_dtype.kind and 'i' in right_dtype.kind:
return max_dtype('i', left_dtype, right_dtype)
# Check if both types are either 'b' (bool) or 'u' (unsigned), 'b' op 'b' is checked above
if set([left_dtype.kind, right_dtype.kind]) <= set('bu'):
return max_dtype('u', left_dtype, right_dtype)
# One of the types is signed
if 'i' in left_dtype.kind:
return signed_unsigned_bin_promote(left_dtype, right_dtype)
return signed_unsigned_bin_promote(right_dtype, left_dtype)
def hack_builtin_types(input_type):
if type(input_type) is int:
return np.int32
elif type(input_type) is float:
return np.float32
else:
return input_type
def bin_promote(left_type, right_type):
left_dtype = np.dtype(hack_builtin_types(left_type))
right_dtype = np.dtype(hack_builtin_types(right_type))
return bin_promote_dtype(left_dtype, right_dtype).type
def div_promote(left_type, right_type):
# For __truediv__ we promote integer results to float, otherwise proceed like with bin op
left_dtype = np.dtype(hack_builtin_types(left_type))
right_dtype = np.dtype(hack_builtin_types(right_type))
if 'f' not in left_dtype.kind and 'f' not in right_dtype.kind:
return np.float32
return float_bin_promote(left_dtype, right_dtype).type
def int_generator(shape, type, no_zeros, limited_range):
iinfo = np.iinfo(type)
if limited_range is not None:
low, high = limited_range
low = max(iinfo.min, low)
high = min(iinfo.max, high)
else:
low, high = iinfo.min / 2, iinfo.max / 2
result = np.random.randint(low, high, shape, type)
zero_mask = result == 0
if no_zeros:
return result + zero_mask
return result
def bool_generator(shape, no_zeros):
result = np.random.choice(a=[True, False], size=shape)
zero_mask = result == False # noqa:E712 Trust me, it's intended math op comparison with False
if no_zeros:
return result | zero_mask
return result
def float_generator(shape, type, _, limited_range):
if limited_range is not None:
low, high = limited_range
else:
low, high = 0., 1.
if isinstance(shape, int):
return type(low + np.random.rand(shape) * (high - low))
elif len(shape) == 2:
return type(low + np.random.rand(*shape) * (high - low))
else:
return type([low + np.random.rand() * (high - low)])
class ExternalInputIterator(object):
"""
Generates inputs of required shapes and types
The number of inputs is based on the length of tuple `types`, if types is a single element
it is considered we should generate 1 output.
If the kind contains 'scalar', than the result is batch of scalar tensors.
the "shape" of `kinds` arguments should match the `types` argument - single elements or tuples
of the same arity.
"""
def __init__(self, batch_size, shape_gen, types, kinds,
disallow_zeros=None, limited_range=None):
try:
self.length = len(types)
except TypeError:
types = (types, )
kinds = (kinds, )
self.length = 1
if not disallow_zeros:
disallow_zeros = (False, ) * self.length
if limited_range is None:
limited_range = (None, ) * self.length
self.batch_size = batch_size
self.types = types
self.gens = []
self.shapes = []
for i in range(self.length):
self.gens += [self.get_generator(self.types[i], disallow_zeros[i], limited_range[i])]
if "scalar" not in kinds[i]:
self.shapes += [shape_gen(i)]
elif "scalar_legacy" in kinds[i]:
self.shapes += [[(1, )] * batch_size]
else:
self.shapes += [[]] # empty shape, special 0D scalar
def __iter__(self):
return self
def __next__(self):
out = ()
for i in range(self.length):
batch = []
# Handle 0D scalars
if self.shapes[i] == []:
batch = self.gens[i](self.batch_size)
else:
for sample in range(self.batch_size):
batch.append(self.gens[i](self.shapes[i][sample]))
out = out + (batch, )
return out
def get_generator(self, type, no_zeros, limited_range):
if type == np.bool_:
return lambda shape: bool_generator(shape, no_zeros)
elif type in [np.float16, np.float32, np.float64]:
return lambda shape: float_generator(shape, type, no_zeros, limited_range)
else:
return lambda shape: int_generator(shape, type, no_zeros, limited_range)
next = __next__
class ExprOpPipeline(Pipeline):
def __init__(self, kinds, types, iterator, op, batch_size, num_threads, device_id):
super(ExprOpPipeline, self).__init__(batch_size, num_threads, device_id, seed=12)
try:
self.length = len(types)
except TypeError:
types = (types, )
kinds = (kinds, )
self.length = 1
self.external_source = []
for i in range(self.length):
self.external_source.append(ops.ExternalSource())
self.kinds = kinds
self.types = types
self.iterator = iterator
self.op = op
def define_graph(self):
self.source = []
inputs = []
for i in range(self.length):
self.source.append(self.external_source[i]())
inputs.append(self.get_operand(self.source[i], self.kinds[i], self.types[i]))
return tuple(self.source) + (self.op(*inputs), )
def get_operand(self, operand, kind, operand_type):
if kind == "const":
return types.Constant(magic_number, np_type_to_dali(operand_type))
elif "cpu" in kind:
return operand
elif "gpu" in kind:
return operand.gpu()
def iter_setup(self):
inputs = self.iterator.next()
for i in range(len(inputs)):
self.feed_input(self.source[i], inputs[i])
def get_numpy_input(input, kind, orig_type, target_type):
"""
:param orig_type: the original type of used input
:param target_type: the type of the result after type promotions
"""
if kind == "const":
return target_type(orig_type(magic_number))
else:
if "scalar" in kind:
return input.astype(target_type).reshape(input.shape)
else:
return input.astype(target_type)
def extract_un_data(pipe_out, sample_id, kind, target_type):
input = as_cpu(pipe_out[0]).at(sample_id)
out = as_cpu(pipe_out[1]).at(sample_id)
assert_equals(out.dtype, target_type)
in_np = get_numpy_input(input, kind, input.dtype.type, target_type)
return in_np, out
def extract_data(pipe_out, sample_id, kinds, target_type):
"""
Extract output for given sample_id from the pipeline
Expand the data based on the kinds parameter and optionally cast it into target type
as numpy does types promotions a bit differently.
"""
arity = len(kinds)
inputs = []
for i in range(arity):
dali_in = as_cpu(pipe_out[i]).at(sample_id)
numpy_in = get_numpy_input(dali_in, kinds[i], dali_in.dtype.type,
target_type if target_type is not None else dali_in.dtype.type)
inputs.append(numpy_in)
out = as_cpu(pipe_out[arity]).at(sample_id)
return tuple(inputs) + (out, )
def check_unary_op(kind, type, op, shape, _):
# Regular arithmetic ops that can be validated as straight numpy
iterator = iter(ExternalInputIterator(batch_size, shape, type, kind))
pipe = ExprOpPipeline(kind, type, iterator, op, batch_size=batch_size, num_threads=2,
device_id=0)
pipe.build()
pipe_out = pipe.run()
for sample in range(batch_size):
in_np, out = extract_un_data(pipe_out, sample, kind, type)
if 'f' in np.dtype(type).kind:
np.testing.assert_allclose(out, op(in_np), rtol=1e-07 if type != np.float16 else 0.005)
else:
np.testing.assert_array_equal(out, op(in_np))
def test_unary_arithmetic_ops():
for kinds in unary_input_kinds:
for (op, op_desc) in unary_operations:
for types_in in input_types:
if types_in != np.bool_:
yield check_unary_op, kinds, types_in, op, shape_small, op_desc
def test_unary_arithmetic_ops_big():
for kinds in unary_input_kinds:
for (op, op_desc) in unary_operations:
yield check_unary_op, kinds, np.int8, op, shape_big, op_desc
def check_math_function_op(kind, type, op, np_op, shape, get_range, op_desc, eps):
is_integer = type not in [np.float16, np.float32, np.float64]
limted_range = get_range(type)
iterator = iter(ExternalInputIterator(batch_size, shape, type, kind,
limited_range=limted_range))
pipe = ExprOpPipeline(kind, type, iterator, op, batch_size=batch_size, num_threads=2,
device_id=0)
pipe.build()
pipe_out = pipe.run()
out_type = np.float32 if is_integer else type
for sample in range(batch_size):
in_np, out = extract_un_data(pipe_out, sample, kind, out_type)
np.testing.assert_allclose(out, np_op(in_np.astype(out_type)),
rtol=eps if type != np.float16 else 0.005)
def test_math_function_ops():
for kinds in unary_input_kinds:
for (op, np_op, op_desc, get_range, eps) in math_function_operations:
for types_in in input_types:
if types_in != np.bool_:
yield (check_math_function_op, kinds, types_in, op, np_op, shape_small,
get_range, op_desc, eps)
def test_math_function_ops_big():
for kinds in unary_input_kinds:
for (op, np_op, op_desc, get_range, eps) in math_function_operations:
for types_in in [np.int8]:
yield (check_math_function_op, kinds, types_in, op, np_op, shape_big,
get_range, op_desc, eps)
def check_arithm_op(kinds, types, op, shape, get_range, op_desc):
# Regular arithmetic ops that can be validated as straight numpy
if isinstance(op, tuple):
dali_op, numpy_op = op
else:
dali_op = numpy_op = op
left_type, right_type = types
target_type = bin_promote(left_type, right_type)
iterator = iter(
ExternalInputIterator(batch_size, shape, types, kinds,
limited_range=get_range(left_type, right_type)))
pipe = ExprOpPipeline(kinds, types, iterator, dali_op, batch_size=batch_size, num_threads=2,
device_id=0)
pipe.build()
pipe_out = pipe.run()
for sample in range(batch_size):
l_np, r_np, out = extract_data(pipe_out, sample, kinds, target_type)
assert_equals(out.dtype, target_type)
if 'f' in np.dtype(target_type).kind:
np.testing.assert_allclose(out, numpy_op(l_np, r_np),
rtol=1e-06 if target_type != np.float16 else 0.005)
else:
np.testing.assert_array_equal(out, numpy_op(l_np, r_np))
def check_ternary_op(kinds, types, op, shape, _):
# Regular arithmetic ops that can be validated as straight numpy
if isinstance(op, tuple):
dali_op, numpy_op = op
else:
dali_op = numpy_op = op
target_type = bin_promote(bin_promote(types[0], types[1]), types[2])
iterator = iter(ExternalInputIterator(batch_size, shape, types, kinds))
pipe = ExprOpPipeline(kinds, types, iterator, dali_op, batch_size=batch_size, num_threads=2,
device_id=0)
pipe.build()
pipe_out = pipe.run()
for sample in range(batch_size):
x, y, z, out = extract_data(pipe_out, sample, kinds, target_type)
assert_equals(out.dtype, target_type)
if 'f' in np.dtype(target_type).kind:
np.testing.assert_allclose(out, numpy_op(x, y, z),
rtol=1e-07 if target_type != np.float16 else 0.005)
else:
np.testing.assert_array_equal(out, numpy_op(x, y, z))
def test_arithmetic_ops_big():
for kinds in bin_input_kinds:
for (op, op_desc, get_range) in sane_operations:
for types_in in [(np.int8, np.int8)]:
yield check_arithm_op, kinds, types_in, op, shape_big, get_range, op_desc
def test_arithmetic_ops_selected():
for kinds in selected_bin_input_kinds:
for (op, op_desc, get_range) in sane_operations:
for types_in in itertools.product(selected_input_types, selected_input_types):
if types_in != (np.bool_, np.bool_) or op_desc == "*":
yield check_arithm_op, kinds, types_in, op, shape_small, get_range, op_desc
@attr('slow')
def slow_test_arithmetic_ops():
for kinds in bin_input_kinds:
for (op, op_desc, get_range) in sane_operations:
for types_in in itertools.product(input_types, input_types):
if types_in != (np.bool_, np.bool_) or op_desc == "*":
yield check_arithm_op, kinds, types_in, op, shape_small, get_range, op_desc
def test_ternary_ops_big():
for kinds in selected_ternary_input_kinds:
for (op, op_desc) in ternary_operations:
for types_in in [
(np.int32, np.int32, np.int32),
(np.int32, np.int8, np.int16),
(np.int32, np.uint8, np.float32),
]:
yield check_ternary_op, kinds, types_in, op, shape_big, op_desc
def test_ternary_ops_selected():
for kinds in selected_ternary_input_kinds:
for (op, op_desc) in ternary_operations:
for types_in in itertools.product(selected_input_arithm_types,
selected_input_arithm_types,
selected_input_arithm_types):
yield check_ternary_op, kinds, types_in, op, shape_small, op_desc
# Only selected types, otherwise it takes too long
@attr('slow')
def slow_test_ternary_ops_kinds():
for kinds in ternary_input_kinds:
for (op, op_desc) in ternary_operations:
for types_in in [
(np.int32, np.int32, np.int32),
(np.float32, np.int32, np.int32),
(np.uint8, np.float32, np.float32),
(np.int32, np.float32, np.float32),
]:
yield check_ternary_op, kinds, types_in, op, shape_small, op_desc
@attr('slow')
def slow_test_ternary_ops_types():
for kinds in selected_ternary_input_kinds:
for (op, op_desc) in ternary_operations:
for types_in in list_product(input_types, input_types, input_types):
if types_in == (np.bool_, np.bool_, np.bool_):
continue
yield check_ternary_op, kinds, types_in, op, shape_small, op_desc
def test_bitwise_ops_selected():
for kinds in selected_bin_input_kinds:
for (op, op_desc) in bitwise_operations:
for types_in in itertools.product(selected_input_types, selected_input_types):
if types_in[0] in integer_types and types_in[1] in integer_types:
yield check_arithm_op, kinds, types_in, op, shape_small, default_range, op_desc
@attr('slow')
def slow_test_bitwise_ops():
for kinds in bin_input_kinds:
for (op, op_desc) in bitwise_operations:
for types_in in itertools.product(input_types, input_types):
if types_in[0] in integer_types and types_in[1] in integer_types:
yield check_arithm_op, kinds, types_in, op, shape_small, default_range, op_desc
def check_comparsion_op(kinds, types, op, shape, _):
# Comparisons - should always return bool
iterator = iter(ExternalInputIterator(batch_size, shape, types, kinds))
pipe = ExprOpPipeline(kinds, types, iterator, op, batch_size=batch_size, num_threads=2,
device_id=0)
pipe.build()
pipe_out = pipe.run()
for sample in range(batch_size):
l_np, r_np, out = extract_data(pipe_out, sample, kinds, None)
assert_equals(out.dtype, np.bool_)
np.testing.assert_array_equal(out, op(l_np, r_np), err_msg=f"{l_np} op\n{r_np} =\n{out}")
def test_comparison_ops_selected():
for kinds in selected_bin_input_kinds:
for (op, op_desc) in comparisons_operations:
for types_in in itertools.product(selected_input_types, selected_input_types):
yield check_comparsion_op, kinds, types_in, op, shape_small, op_desc
@attr('slow')
def slow_test_comparison_ops():
for kinds in bin_input_kinds:
for (op, op_desc) in comparisons_operations:
for types_in in itertools.product(input_types, input_types):
yield check_comparsion_op, kinds, types_in, op, shape_small, op_desc
# The div operator that always returns floating point values
def check_arithm_binary_float(kinds, types, op, shape, get_range, _):
if isinstance(op, tuple):
dali_op, numpy_op = op
else:
dali_op = numpy_op = op
left_type, right_type = types
target_type = div_promote(left_type, right_type)
iterator = iter(
ExternalInputIterator(batch_size, shape, types, kinds, (False, True),
limited_range=get_range(left_type, right_type)))
pipe = ExprOpPipeline(kinds, types, iterator, dali_op, batch_size=batch_size, num_threads=2,
device_id=0)
pipe.build()
pipe_out = pipe.run()
for sample in range(batch_size):
l_np, r_np, out = extract_data(pipe_out, sample, kinds, target_type)
assert_equals(out.dtype, target_type)
np.testing.assert_allclose(out, numpy_op(l_np, r_np),
rtol=1e-06 if target_type != np.float16 else 0.005,
err_msg=f"{l_np} op\n{r_np} =\n{out}")
def test_arithmetic_binary_float_big():
for kinds in bin_input_kinds:
for types_in in [(np.int8, np.int8)]:
for (op, op_desc, get_range) in floaty_operations:
yield check_arithm_binary_float, kinds, types_in, op, shape_big, get_range, op_desc
def test_arithmetic_binary_float_selected():
for kinds in selected_bin_input_kinds:
for types_in in itertools.product(selected_input_types, selected_input_types):
for (op, op_desc, get_range) in floaty_operations:
if types_in != (np.bool_, np.bool_):
yield (check_arithm_binary_float, kinds, types_in, op, shape_small, get_range,
op_desc)
@attr('slow')
def slow_test_arithmetic_binary_float():
for kinds in bin_input_kinds:
for types_in in itertools.product(input_types, input_types):
for (op, op_desc, get_range) in floaty_operations:
if types_in != (np.bool_, np.bool_):
yield (check_arithm_binary_float, kinds, types_in, op, shape_small, get_range,
op_desc)
def check_arithm_div(kinds, types, shape):
# The div operator behaves like C/C++ one
left_type, right_type = types
target_type = bin_promote(left_type, right_type)
iterator = iter(ExternalInputIterator(batch_size, shape, types, kinds, (False, True)))
pipe = ExprOpPipeline(kinds, types, iterator, (lambda x, y: x // y), batch_size=batch_size,
num_threads=2, device_id=0)
pipe.build()
pipe_out = pipe.run()
for sample in range(batch_size):
l_np, r_np, out = extract_data(pipe_out, sample, kinds, target_type)
assert_equals(out.dtype, target_type)
if 'f' in np.dtype(target_type).kind:
np.testing.assert_allclose(out, l_np / r_np,
rtol=1e-07 if target_type != np.float16 else 0.005)
else:
# Approximate validation, as np does something different than C
result = np.floor_divide(np.abs(l_np), np.abs(r_np))
neg = ((l_np < 0) & (r_np > 0)) | ((l_np > 0) & (r_np < 0))
pos = ~neg
result = result * (pos * 1 - neg * 1)
np.testing.assert_array_equal(out, result)
def test_arithmetic_division_big():
for kinds in bin_input_kinds:
for types_in in [(np.int8, np.int8)]:
yield check_arithm_div, kinds, types_in, shape_big
def test_arithmetic_division_selected():
for kinds in selected_bin_input_kinds:
for types_in in itertools.product(selected_input_types, selected_input_types):
if types_in != (np.bool_, np.bool_):
yield check_arithm_div, kinds, types_in, shape_small
@attr('slow')
def slow_test_arithmetic_division():
for kinds in bin_input_kinds:
for types_in in itertools.product(input_types, input_types):
if types_in != (np.bool_, np.bool_):
yield check_arithm_div, kinds, types_in, shape_small
def check_raises(kinds, types, op, shape):
if isinstance(op, tuple):
dali_op = op[0]
else:
dali_op = op
iterator = iter(ExternalInputIterator(batch_size, shape, types, kinds))
pipe = ExprOpPipeline(kinds, types, iterator, dali_op, batch_size=batch_size, num_threads=2,
device_id=0)
pipe.build()
pipe.run()
def check_raises_re(kinds, types, op, shape, _, msg):
with assert_raises(RuntimeError, regex=msg):
check_raises(kinds, types, op, shape)
@raises(TypeError, glob=("\"DataNode\" was used in conditional context - it might have been used "
"in truth evaluation for `if` statement, logical expression or cast to a "
"boolean. To use conditional execution via `if` statements you need to "
"specify `enable_conditionals=True` in `@nvidia.dali.pipeline_def` "
"decorator. You can read more about conditional execution in specific "
"section of the Pipeline documentation. Bool conversion can be achieved "
"with the `cast` operator."))
def check_raises_te(kinds, types, op, shape, _):
check_raises(kinds, types, op, shape)
# Arithmetic operations between booleans that are not allowed
bool_disallowed = [((lambda x, y: x + y), "+"), ((lambda x, y: x - y), "-"),
((lambda x, y: x / y), "/"), ((lambda x, y: x / y), "//"),
((lambda x, y: x**y), "**")]
def test_bool_disallowed():
error_msg = ("Input[s]? to arithmetic operator `[\\S]*` cannot be [a]?[ ]?boolean[s]?."
" Consider using bitwise operator[s]?")
for kinds in unary_input_kinds:
for (op, _, op_desc, _, _) in math_function_operations:
yield check_raises_re, kinds, np.bool_, op, shape_small, op_desc, error_msg
for kinds in bin_input_kinds:
for (op, op_desc) in bool_disallowed:
yield check_raises_re, kinds, (np.bool_, np.bool_), op, shape_small, op_desc, error_msg
for kinds in selected_ternary_input_kinds:
for (op, op_desc) in ternary_operations:
yield (check_raises_re, kinds, (np.bool_, np.bool_, np.bool_), op, shape_small, op_desc,
error_msg)
def test_bitwise_disallowed():
error_msg = "Inputs to bitwise operator `[\\S]*` must be of integral type."
for kinds in bin_input_kinds:
for (op, op_desc) in bitwise_operations:
for types_in in itertools.product(selected_input_types, selected_input_types):
if types_in[0] in float_types or types_in[1] in float_types:
yield check_raises_re, kinds, types_in, op, shape_small, op_desc, error_msg
def test_prohibit_min_max():
for kinds in bin_input_kinds:
for op, op_desc in [(min, "min"), (max, "max")]:
yield check_raises_te, kinds, (np.int32, np.int32), op, shape_small, op_desc
@raises(TypeError, glob=("\"DataNode\" was used in conditional context - it might have been used "
"in truth evaluation for `if` statement, logical expression or cast to a "
"boolean. To use conditional execution via `if` statements you need to "
"specify `enable_conditionals=True` in `@nvidia.dali.pipeline_def` "
"decorator. You can read more about conditional execution in specific "
"section of the Pipeline documentation. Bool conversion can be achieved "
"with the `cast` operator."))
def test_bool_raises():
bool(DataNode("dummy"))
def test_binary_ops_broadcasting():
def get_sh(arg_idx):
shapes0 = [(43, 42, 3), (4, 3, 16), (8, 1, 2), (1, 2, 64)]
shapes1 = [(1, 1, 3), (1, 1, 1), (1, 8, 2), (1, 2, 64)]
if arg_idx == 0:
return shapes0
elif arg_idx == 1:
return shapes1
else:
assert False
for kinds in list_product(["cpu", "gpu"], ["cpu", "gpu"]):
for (op, op_desc, get_range) in sane_operations:
for types_in in itertools.product(selected_input_types, selected_input_types):
if types_in != (np.bool_, np.bool_) or op_desc == "*":
yield check_arithm_op, kinds, types_in, op, get_sh, get_range, op_desc
def test_ternary_ops_broadcasting():
def get_sh(arg_idx):
shapes0 = [(43, 42, 3), (4, 3, 16), (8, 1, 2), (1, 2, 64)]
shapes1 = [(1, 1, 3), (1, 1, 1), (1, 8, 2), (1, 2, 64)]
shapes2 = [(43, 1, 3), (4, 1, 16), (8, 1, 2), (1, 1, 1)]
if arg_idx == 0:
return shapes0
elif arg_idx == 1:
return shapes1
elif arg_idx == 2:
return shapes2
else:
assert False
for kinds in ("cpu", "cpu", "cpu"), ("gpu", "gpu", "gpu"):
for (op, op_desc) in ternary_operations:
for types_in in itertools.product(selected_input_arithm_types,
selected_input_arithm_types,
selected_input_arithm_types):
yield check_ternary_op, kinds, types_in, op, get_sh, op_desc
def generate_layout_broadcasting_cases():
rng = np.random.default_rng(4242)
def get_input_dev(num_inputs):
placement = rng.choice(["cpu", "gpu", "non_uniform"])
if placement != "non_uniform":
return (placement, ) * num_inputs
placement = [rng.choice(["cpu", "gpu"]) for _ in range(num_inputs - 1)]
placement.append("gpu" if placement[-1] == "cpu" else "cpu")
return tuple(placement)
def get_input_types(num_inputs, integral_only):
types = (np.int32, np.uint8)
if not integral_only:
types += (np.float32, )
return tuple(rng.choice(types, size=(num_inputs, )))
# The input layouts and the expected output layout.
# A number N denotes an ND tensor without a layout.
# `Exception` means that applying an operator with the arguments with
# given layouts should raise an error.
bin_layouts = [
((4, "C"), 4),
(("C", 3), 3),
(("C", 2), 2),
(("C", 1), "C"),
((1, "C"), "C"),
((0, "C"), "C"),
(("C", 0), "C"),
(("ABCD", 0), "ABCD"),
((0, "ABCD"), "ABCD"),
(("ABCD", 3), "ABCD"),
((1, "ABCD"), "ABCD"),
(("ABCD", "D"), "ABCD"),
(("D", "ABCD"), "ABCD"),
(("ABCD", "CD"), "ABCD"),
(("ABCD", "BCD"), "ABCD"),
(("BCD", "ABCD"), "ABCD"),
(("ABCD", "ABCD"), "ABCD"),
(("ABCD", "ABC"), Exception()),
(("X", "ABCD"), Exception()),
]
ternary_layouts = [
(("ABCD", "CD", "D"), "ABCD"),
(("ABCD", "D", "CD"), "ABCD"),
((3, "ABCD", "CD"), "ABCD"),
((0, "ABCD", 0), "ABCD"),
((0, "BCD", 4), 4),
((3, 4, "CD"), 4),
((4, "ABCD", 4), "ABCD"),
((4, "A", "B"), Exception()),
]
bin_ops = floaty_operations[:5] + bitwise_operations[:3] + \
comparisons_operations[:2] + sane_operations
def tensor_desc(ndim_or_layout):
if isinstance(ndim_or_layout, int):
ndim = ndim_or_layout
layout = None
else:
assert isinstance(ndim_or_layout, str)
ndim = len(ndim_or_layout)
layout = ndim_or_layout
max_shape = (5, 7, 11, 13)
shape = tuple() if ndim == 0 else max_shape[-ndim:]
return shape, layout
for num_inputs, layouts, op_lists in [
(2, bin_layouts, bin_ops),
(3, ternary_layouts, ternary_operations),
]:
for i, (args_desc, out_desc) in enumerate(layouts):
assert (len(args_desc) == num_inputs)
op, op_name = op_lists[i % len(op_lists)][:2]
op = op if not isinstance(op, tuple) else op[0]
input_devs = get_input_dev(num_inputs)
in_types = get_input_types(num_inputs, op_name in ("&|^"))
args_desc = tuple(tensor_desc(arg) for arg in args_desc)
if not isinstance(out_desc, Exception):
out_desc = tensor_desc(out_desc)
yield op_name, args_desc, out_desc, input_devs, in_types, op
@params(*tuple(generate_layout_broadcasting_cases()))
def test_layout_broadcasting(op_name, args_desc, out_desc, in_devs, in_types, op):
assert len(args_desc) == len(in_devs)
assert len(in_types) == len(in_devs)
batch_size = 4
@pipeline_def(batch_size=batch_size, device_id=0, num_threads=4)
def pipeline():
in_nodes = [
types.Constant(np.full(shape, 1, dtype=in_type), device=in_dev, layout=layout)
for (shape, layout), in_dev, in_type in zip(args_desc, in_devs, in_types)
]
return op(*in_nodes)
p = pipeline()
p.build()
if isinstance(out_desc, Exception):
with assert_raises(Exception, glob="They must be equal or one must be a suffix"):
p.run()
else:
o, = p.run()
expected_shape, expected_layout = out_desc
expected_layout = expected_layout or ""
assert o.layout() == expected_layout, f"got `{o.layout()}`, expected `{expected_layout}`"
out_shape = o.shape()
assert len(out_shape) == batch_size, f"got `{len(out_shape)}`, expected `{batch_size}`"
for sample_shape in out_shape:
assert sample_shape == expected_shape, \
f"got `{sample_shape}`, expected `{expected_shape}`"
def test_broadcasting_dimensionality_limits():
def impl(device, shape_a, shape_b):
@pipeline_def(batch_size=1, num_threads=3, device_id=0)
def pipe():
a = fn.random.uniform(range=[-1, 1], shape=shape_a)
b = fn.random.uniform(range=[-1, 1], shape=shape_b)
return a + b
p = pipe()
p.build()
p.run()
# ERROR
error_msg = \
"Broadcasting pattern too complex. Can't operate with simplified" + \
" shapes with more than 6 groups of dimensions. Got 10 groups. " + \
"For more details see https://docs.nvidia.com/deeplearning/dali/user-guide/docs/math.html"
shape_a_err = (2, 1, 2, 1, 2, 1, 2, 1, 2, 1)
shape_b_err = (1, 2, 1, 2, 1, 2, 1, 2, 1, 2)
for device in ['cpu', 'gpu']:
with assert_raises(RuntimeError, glob=error_msg):
impl(device, shape_a_err, shape_b_err)
# NO ERROR (exactly 6 groups)
shape_a_ok = (2, 1, 1, 1, 3, 1, 4, 5, 6, 1)
shape_b_ok = (1, 2, 3, 4, 1, 5, 1, 1, 1, 6)
for device in ['cpu', 'gpu']:
impl(device, shape_a_ok, shape_b_ok)
def test_broadcasting_incompatible_shapes():
def impl(device, shape_a, shape_b):
@pipeline_def(batch_size=1, num_threads=3, device_id=0)
def pipe():
a = fn.random.uniform(range=[-1, 1], shape=shape_a)
b = fn.random.uniform(range=[-1, 1], shape=shape_b)
return a + b
p = pipe()
p.build()
p.run()
error_msg1 = "Can't broadcast shapes:*" + \
"2 x 3 x 4 (d=2, belonging to sample_idx=0)\n" + \
"2 x 3 x 3 (d=2, belonging to sample_idx=0)"
shape_a1 = (2, 3, 4)
shape_b1 = (2, 3, 3)
for device in ['cpu', 'gpu']:
with assert_raises(RuntimeError, glob=error_msg1):
impl(device, shape_a1, shape_b1)
error_msg2 = "Can't broadcast shapes:*" + \
"1 x 4 (d=1, belonging to sample_idx=0)\n" + \
"3 (d=0, belonging to sample_idx=0)"
shape_a2 = (1, 4)
shape_b2 = (3)
for device in ['cpu', 'gpu']:
with assert_raises(RuntimeError, glob=error_msg2):
impl(device, shape_a2, shape_b2)
|
DALI-main
|
dali/test/python/operator_1/test_arithmetic_ops.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy.io.wavfile
from nvidia.dali import fn, pipeline_def, types
from test_audio_decoder_utils import generate_waveforms
from test_utils import check_batch, dali_type_to_np, as_array
names = [
"/tmp/dali_test_1C.wav",
"/tmp/dali_test_2C.wav",
"/tmp/dali_test_4C.wav"
]
freqs = [
np.array([0.02]),
np.array([0.01, 0.012]),
np.array([0.01, 0.012, 0.013, 0.014])
]
rates = [16000, 22050, 12347]
lengths = [10000, 54321, 12345]
def create_files():
for i in range(len(names)):
wave = generate_waveforms(lengths[i], freqs[i])
wave = (wave * 32767).round().astype(np.int16)
scipy.io.wavfile.write(names[i], rates[i], wave)
create_files()
@pipeline_def
def audio_decoder_pipe(device):
encoded, _ = fn.readers.file(files=names)
audio0, sr0 = fn.decoders.audio(encoded, dtype=types.FLOAT)
out_sr = 15000
audio1, sr1 = fn.decoders.audio(encoded, dtype=types.FLOAT, sample_rate=out_sr)
if device == 'gpu':
audio0 = audio0.gpu()
audio2 = fn.audio_resample(audio0, in_rate=sr0, out_rate=out_sr)
audio3 = fn.audio_resample(audio0, scale=out_sr / sr0)
audio4 = fn.audio_resample(audio0, out_length=fn.shapes(audio1)[0])
return audio1, audio2, audio3, audio4
def _test_standalone_vs_fused(device):
pipe = audio_decoder_pipe(device=device, batch_size=2, num_threads=1, device_id=0)
pipe.build()
is_gpu = device == 'gpu'
for _ in range(2):
outs = pipe.run()
# two sampling rates - should be bit-exact
check_batch(outs[0], outs[1], eps=1e-6 if is_gpu else 0,
max_allowed_error=1e-4 if is_gpu else 0)
# numerical round-off error in rate
check_batch(outs[0], outs[2], eps=1e-6, max_allowed_error=1e-4)
# here, the sampling rate is slightly different, so we can tolerate larger errors
check_batch(outs[0], outs[3], eps=1e-4, max_allowed_error=1)
def test_standalone_vs_fused():
for device in ('gpu', 'cpu'):
yield _test_standalone_vs_fused, device
def _test_type_conversion(device, src_type, in_values, dst_type, out_values, eps):
src_nptype = dali_type_to_np(src_type)
dst_nptype = dali_type_to_np(dst_type)
assert len(out_values) == len(in_values)
in_data = [np.full((100 + 10 * i,), x, src_nptype) for i, x in enumerate(in_values)]
@pipeline_def(batch_size=len(in_values))
def test_pipe(device):
input = fn.external_source(in_data, batch=False, cycle='quiet', device=device)
return fn.audio_resample(input, dtype=dst_type, scale=1, quality=0)
pipe = test_pipe(device, device_id=0, num_threads=4)
pipe.build()
for _ in range(2):
out, = pipe.run()
assert len(out) == len(out_values)
assert out.dtype == dst_type
for i in range(len(out_values)):
ref = np.full_like(in_data[i], out_values[i], dst_nptype)
out_arr = as_array(out[i])
if not np.allclose(out_arr, ref, 1e-6, eps):
print("Actual: ", out_arr)
print(out_arr.dtype, out_arr.shape)
print("Reference: ", ref)
print(ref.dtype, ref.shape)
print("Diff: ", out_arr.astype(np.float) - ref)
assert np.allclose(out_arr, ref, 1e-6, eps)
def test_dynamic_ranges():
for type, values, eps in [
(types.FLOAT, [-1.e30, -1 - 1.e-6, -1, -0.5, -1.e-30, 0, 1.e-30, 0.5, 1, 1 + 1.e-6, 1e30],
0),
(types.UINT8, [0, 1, 128, 254, 255], 0),
(types.INT8, [-128, -127, -1, 0, 1, 127], 0),
(types.UINT16, [0, 1, 32767, 32768, 65534, 65535], 0),
(types.INT16, [-32768, -32767, -100, -1, 0, 1, 100, 32767], 0),
(types.UINT32, [0, 1, 0x7fffffff, 0x80000000, 0xfffffffe, 0xffffffff], 128),
(types.INT32, [-0x80000000, -0x7fffffff, -100, -1, 0, 1, 0x7fffffff], 128)
]:
for device in ('cpu', 'gpu'):
yield _test_type_conversion, device, type, values, type, values, eps
def test_type_conversion():
type_ranges = [(types.FLOAT, [-1, 1]),
(types.UINT8, [0, 255]),
(types.INT8, [-127, 127]),
(types.UINT16, [0, 65535]),
(types.INT16, [-32767, 32767]),
(types.INT32, [-0x7fffffff, 0x7fffffff]),
(types.UINT32, [0, 0xffffffff])]
for src_type, src_range in type_ranges:
i_lo, i_hi = src_range
if i_lo == -i_hi:
in_values = [i_lo, 0, i_hi]
else:
in_values = [i_lo, (i_lo + i_hi) // 2, (i_lo + i_hi + 1) // 2, i_hi]
for dst_type, dst_range in type_ranges:
o_lo, o_hi = dst_range
if len(in_values) == 3:
if o_lo != -o_hi:
out_values = [o_lo, (o_hi + o_lo + 1) / 2, o_hi] # rounding
else:
out_values = [o_lo, 0, o_hi]
else:
out_values = [o_lo, o_lo + (o_hi - o_lo) * in_values[1] / (i_hi - i_lo),
o_lo + (o_hi - o_lo) * in_values[2] / (i_hi - i_lo), o_hi]
if dst_type != types.FLOAT:
out_values = list(map(int, out_values))
eps = (o_hi - o_lo) / 2 ** 24 + (i_hi - i_lo) / 2 ** 24
print(src_type, in_values, dst_type, out_values)
# the result will be halfway - add epsilon of 1
if eps < 1 and (o_lo != -o_hi or (i_hi != i_lo and dst_type != types.FLOAT)):
eps = 1
for device in ('cpu', 'gpu'):
yield _test_type_conversion, device, src_type, in_values, dst_type, out_values, eps
|
DALI-main
|
dali/test/python/operator_1/test_audio_resample.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline, pipeline_def
import nvidia.dali.types as types
import nvidia.dali.fn as fn
import numpy as np
import cv2
from scipy.ndimage import convolve1d
import os
from nose_utils import assert_raises, raises
from nose.plugins.attrib import attr
from sequences_test_utils import video_suite_helper, ArgCb
from test_utils import get_dali_extra_path, check_batch, compare_pipelines, \
RandomlyShapedDataIterator, dali_type
data_root = get_dali_extra_path()
images_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
test_iters = 4
shape_layout_axes_cases = [((20, 20, 30, 3), "DHWC", 3), ((20, 20, 30), "", 3),
((20, 30, 3), "HWC", 2), ((20, 30), "HW", 2),
((3, 30, 20), "CWH", 2), ((5, 20, 30, 3), "FHWC", 2),
((5, 10, 10, 7, 3), "FDHWC", 3), ((5, 3, 20, 30), "FCHW", 2),
((3, 5, 10, 10, 7), "CFDHW", 3)]
def to_batch(tl, batch_size):
return [np.array(tl[i]) for i in range(batch_size)]
def to_cv_sigma(sigma, axes=2):
if sigma is None:
return (0,) * axes
elif isinstance(sigma, (int, float)):
return (sigma,) * axes
elif (isinstance(sigma, np.ndarray) and len(sigma.shape) == 0):
return (float(sigma),) * axes
elif len(sigma) == 1:
return (sigma[0],) * axes
return tuple(reversed(sigma))
def to_cv_win_size(window_size, axes=2, sigma=None):
if window_size is None:
# when using cv2.getGaussianKernel we need to always provide window size
if sigma is not None:
sigma = to_cv_sigma(sigma, axes)
return tuple([int(3 * s + 0.5) * 2 + 1 for s in sigma])
return (0,) * axes
elif isinstance(window_size, int):
return (int(window_size),) * axes
elif (isinstance(window_size, np.ndarray) and len(window_size.shape) == 0):
return (int(window_size),) * axes
elif len(window_size) == 1:
return (int(window_size[0]),) * axes
# OpenCV shape is the other way round: (width, height)
return tuple(int(x) for x in reversed(window_size))
def gaussian_cv(image, sigma, window_size):
sigma_x, sigma_y = to_cv_sigma(sigma)
window_size_cv = to_cv_win_size(window_size)
# compute on floats and round like a sane person (in mathematically complicit way)
blurred = cv2.GaussianBlur(np.float32(image), window_size_cv, sigmaX=sigma_x, sigmaY=sigma_y)
return np.uint8(blurred + 0.5)
def gaussian_baseline(image, sigma, window_size, axes=2, skip_axes=0, dtype=np.uint8):
sigma_xyz = to_cv_sigma(sigma, axes)
win_xyz = to_cv_win_size(window_size, axes, sigma)
filters = [cv2.getGaussianKernel(win_xyz[i], sigma_xyz[i]) for i in range(axes)]
filters = [np.float32(f).squeeze() for f in filters]
filters.reverse()
for i in reversed(range(axes)):
axis = i + skip_axes
if image.shape[axis] == 1:
mode = "nearest"
else:
mode = "mirror"
image = convolve1d(np.float32(image), filters[i], axis, mode=mode)
if dtype == np.float32:
return image
else:
return dtype(image + 0.5)
def get_gaussian_pipe(batch_size, sigma, window_size, op_type):
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
with pipe:
input, _ = fn.readers.file(file_root=images_dir, shard_id=0, num_shards=1)
decoded = fn.decoders.image(input, device="cpu", output_type=types.RGB)
if op_type == "gpu":
decoded = decoded.gpu()
blurred = fn.gaussian_blur(decoded, device=op_type, sigma=sigma, window_size=window_size)
pipe.set_outputs(blurred, decoded)
return pipe
def check_gaussian_blur(batch_size, sigma, window_size, op_type="cpu"):
pipe = get_gaussian_pipe(batch_size, sigma, window_size, op_type)
pipe.build()
for _ in range(test_iters):
result, input = pipe.run()
if op_type == "gpu":
result = result.as_cpu()
input = input.as_cpu()
input = to_batch(input, batch_size)
baseline_cv = [gaussian_cv(img, sigma, window_size) for img in input]
check_batch(result, baseline_cv, batch_size, max_allowed_error=1, expected_layout="HWC")
def test_image_gaussian_blur():
for dev in ["cpu", "gpu"]:
for sigma in [1.0]:
for window_size in [3, 5, None]:
if sigma is None and window_size is None:
continue
yield check_gaussian_blur, 10, sigma, window_size, dev
# OpenCv uses fixed values for small windows that are different that Gaussian funcion
yield check_gaussian_blur, 10, None, 11, dev
@attr('slow')
def slow_test_image_gaussian_blur():
for dev in ["cpu", "gpu"]:
for sigma in [1.0, [1.0, 2.0]]:
for window_size in [3, 5, [7, 5], [5, 9], None]:
if sigma is None and window_size is None:
continue
yield check_gaussian_blur, 10, sigma, window_size, dev
# OpenCv uses fixed values for small windows that are different that Gaussian funcion
for window_size in [15, [17, 31]]:
yield check_gaussian_blur, 10, None, window_size, dev
def check_gaussian_blur_cpu_gpu(batch_size, sigma, window_size):
cpu_pipe = get_gaussian_pipe(batch_size, sigma, window_size, "cpu")
gpu_pipe = get_gaussian_pipe(batch_size, sigma, window_size, "gpu")
compare_pipelines(cpu_pipe, gpu_pipe, batch_size, 16, max_allowed_error=1)
def test_gaussian_blur_cpu_gpu():
for window_size in [5, [7, 13]]:
yield check_gaussian_blur_cpu_gpu, 10, None, window_size
@attr('slow')
def slow_test_gaussian_blur_cpu_gpu():
for sigma in [1.0, [1.0, 2.0], None]:
for window_size in [3, 5, [7, 5], [5, 9], 11, 15, 31, None]:
if sigma is None and window_size is None:
continue
yield check_gaussian_blur_cpu_gpu, 10, sigma, window_size
def count_skip_axes(layout):
if layout.startswith("FC") or layout.startswith("CF"):
return 2
elif layout.startswith("F") or layout.startswith("C"):
return 1
else:
return 0
def check_generic_gaussian_blur(
batch_size, sigma, window_size, shape, layout, axes, op_type="cpu", in_dtype=np.uint8,
out_dtype=types.NO_TYPE, random_shape=True):
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
min_shape = None if random_shape else shape
data = RandomlyShapedDataIterator(
batch_size, min_shape=min_shape, max_shape=shape, dtype=in_dtype)
# Extract the numpy type from DALI, we can have float32 or the same as input
if out_dtype == types.NO_TYPE:
out_dtype = None
result_type = in_dtype
elif dali_type(in_dtype) == out_dtype:
result_type = in_dtype
else:
result_type = np.float32
with pipe:
input = fn.external_source(data, layout=layout)
if op_type == "gpu":
input = input.gpu()
blurred = fn.gaussian_blur(input, device=op_type, sigma=sigma,
window_size=window_size, dtype=out_dtype)
pipe.set_outputs(blurred, input)
pipe.build()
for _ in range(test_iters):
result, input = pipe.run()
if op_type == "gpu":
result = result.as_cpu()
input = input.as_cpu()
input = to_batch(input, batch_size)
skip_axes = count_skip_axes(layout)
baseline = [
gaussian_baseline(img, sigma, window_size, axes, skip_axes, dtype=result_type)
for img in input]
max_error = 1 if result_type != np.float32 else 1e-04
check_batch(result, baseline, batch_size, max_allowed_error=max_error,
expected_layout=layout)
# Generate tests for single or per-axis sigma and window_size arguments
def generate_generic_cases(dev, t_in, t_out):
for shape, layout, axes in shape_layout_axes_cases:
for sigma in [1.0, [1.0, 2.0, 3.0]]:
for window_size in [3, 5, [7, 5, 9], [3, 5, 9], None]:
if isinstance(sigma, list):
sigma = sigma[0:axes]
if isinstance(window_size, list):
window_size = window_size[0:axes]
yield check_generic_gaussian_blur, \
10, sigma, window_size, shape, layout, axes, dev, t_in, t_out
for window_size in [11, 15]:
yield check_generic_gaussian_blur, \
10, None, window_size, shape, layout, axes, dev, t_in, t_out
def test_generic_gaussian_blur():
for dev in ["cpu", "gpu"]:
for (t_in, t_out) in [(np.uint8, types.NO_TYPE),
(np.float32, types.FLOAT),
(np.uint8, types.FLOAT)]:
yield from generate_generic_cases(dev, t_in, t_out)
def test_one_sized_extent():
for dev in ["cpu", "gpu"]:
for shape, layout in [((1, 10, 6), "DHW"),
((10, 1, 3), "HWC"),
((1, 10, 3), "HWC"),
((1, 10), "HW"),
((10, 1), "HW")]:
axes = len(layout) - ("C" in layout)
yield check_generic_gaussian_blur, \
10, 2.0, 5, shape, layout, axes, dev, \
np.float32, types.FLOAT, False
@attr('slow')
def slow_test_generic_gaussian_blur():
for dev in ["cpu", "gpu"]:
for t_in in [np.uint8, np.int32, np.float32]:
for t_out in [types.NO_TYPE, types.FLOAT, dali_type(t_in)]:
yield from generate_generic_cases(dev, t_in, t_out)
def check_per_sample_gaussian_blur(
batch_size, sigma_dim, window_size_dim, shape, layout, axes, op_type="cpu"):
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
data = RandomlyShapedDataIterator(batch_size, max_shape=shape)
with pipe:
if sigma_dim is not None:
sigma = fn.random.uniform(range=[0.5, 3], shape=[sigma_dim])
sigma_arg = sigma
else:
# placeholder, so we can return something
sigma = fn.random.coin_flip(probability=0)
sigma_arg = None
if window_size_dim is not None:
window_radius = fn.random.uniform(range=[5, 10], shape=[window_size_dim])
window_size = fn.cast(window_radius, dtype=types.INT32) * 2 + 1
window_arg = window_size
else:
window_size = fn.random.coin_flip(probability=0)
window_arg = None
input = fn.external_source(data, layout=layout)
if op_type == "gpu":
input = input.gpu()
blurred = fn.gaussian_blur(input, device=op_type, sigma=sigma_arg, window_size=window_arg)
pipe.set_outputs(blurred, input, sigma, window_size)
pipe.build()
for _ in range(test_iters):
result, input, sigma, window_size = pipe.run()
if op_type == "gpu":
result = result.as_cpu()
input = input.as_cpu()
input = to_batch(input, batch_size)
sigma = to_batch(sigma, batch_size)
window_size = to_batch(window_size, batch_size)
baseline = []
for i in range(batch_size):
sigma_arg = sigma[i] if sigma is not None else None
window_arg = window_size[i] if window_size_dim is not None else None
skip_axes = count_skip_axes(layout)
baseline.append(gaussian_baseline(input[i], sigma_arg, window_arg, axes, skip_axes))
check_batch(result, baseline, batch_size, max_allowed_error=1, expected_layout=layout)
# TODO(klecki): consider checking mixed ArgumentInput/Scalar value cases
def test_per_sample_gaussian_blur():
for dev in ["cpu", "gpu"]:
for shape, layout, axes in shape_layout_axes_cases:
for sigma_dim in [None, 1, axes]:
for window_size_dim in [None, 1, axes]:
if sigma_dim is None and window_size_dim is None:
continue
yield check_per_sample_gaussian_blur, \
10, sigma_dim, window_size_dim, shape, layout, axes, dev
def check_fail_gaussian_blur(batch_size, sigma, window_size, shape, layout, axes, op_type,
err_regex, in_dtype=np.uint8, out_dtype=types.NO_TYPE):
with assert_raises(RuntimeError, regex=err_regex):
check_generic_gaussian_blur(batch_size, sigma, window_size, shape, layout, axes, op_type,
in_dtype, out_dtype)
def test_fail_gaussian_blur():
for dev in ["cpu", "gpu"]:
# Check layout and channel placement errors
args = [
((20, 20, 30, 3), "DHCW", 3,
r"Only channel-first or channel-last layouts are supported, got: .*\."),
((5, 20, 30, 3), "HFWC", 2,
r"For sequences, layout should begin with 'F' or 'C', got: .*\."),
((5, 10, 10, 10, 7, 3), "FWXYZC", 4,
r"Too many dimensions, found: \d+ data axes, maximum supported is: 3\."),
((5, 3, 20, 3, 30), "FCHCW", 2,
r"Only channel-first or channel-last layouts are supported, got: .*\."),
((5, 3, 20, 3, 30), "FCCHW", 2,
r"Found more the one occurrence of 'F' or 'C' axes in layout: .*\.")
]
for shape, layout, axes, err_regex in args:
yield check_fail_gaussian_blur, \
10, 1.0, 11, shape, layout, axes, dev, err_regex
# Negative, disallowed or both unspecified values of sigma and window size
yield check_fail_gaussian_blur, \
10, 0.0, 0, (100, 20, 3), "HWC", 3, dev, \
r"`sigma` and `window_size` shouldn't be 0 at the same time for sample: \d+, " \
r"axis: \d+\."
yield check_fail_gaussian_blur, \
10, -1.0, 0, (100, 20, 3), "HWC", 3, dev, \
r"`sigma` must have non-negative values, got .\d* for sample: \d*, axis: \d*\."
yield check_fail_gaussian_blur, \
10, 0.0, -11, (100, 20, 3), "HWC", 3, dev, \
r"`window_size` must have non-negative values, got .\d* for sample: \d*, axis : \d*\."
yield check_fail_gaussian_blur, \
10, 0.0, 2, (100, 20, 3), "HWC", 3, "cpu", \
r"Kernel window should have odd length, got: \d*\."
yield check_fail_gaussian_blur, \
10, 0.0, 2, (100, 20, 3), "HWC", 3, "gpu", \
r"Even or non-centered windows are not supported yet, got window with even length: " \
r"[\s\S]* for sample \d*\."
def test_per_frame():
def window_size(sample_desc):
return np.array(2 * sample_desc.rng.randint(1, 15) + 1, dtype=np.int32)
def per_axis_window_size(sample_desc):
return np.array([window_size(sample_desc) for _ in range(2)])
def sigma(sample_desc):
return np.array((sample_desc.rng.random() + 1) * 3., dtype=np.float32)
def per_axis_sigma(sample_desc):
return np.array([sigma(sample_desc) for _ in range(2)])
video_test_cases = [
(fn.gaussian_blur, {'window_size': 3}, []),
(fn.gaussian_blur, {}, [ArgCb("window_size", window_size, True)]),
(fn.gaussian_blur, {}, [ArgCb("window_size", per_axis_window_size, True)]),
(fn.gaussian_blur, {}, [ArgCb("sigma", sigma, True)]),
(fn.gaussian_blur, {}, [
ArgCb("window_size", per_axis_window_size, True),
ArgCb("sigma", per_axis_sigma, True)]),
(fn.gaussian_blur, {'dtype': types.FLOAT}, [
ArgCb("window_size", per_axis_window_size, False),
ArgCb("sigma", per_axis_sigma, True)]),
]
yield from video_suite_helper(video_test_cases, expand_channels=True)
# test if SequenceOperator properly errors out on per-frame argument when input is expanded only
# because of channel-first layout (but there are no frames on the input)
@raises(RuntimeError, "Tensor input for argument window_size is specified per frame "
"(got F layout). In that case, samples in the input 0 must contain "
"frames too. Got layout `CHW` that does not contain frames.")
def test_fail_per_frame_no_frames():
@pipeline_def
def pipeline():
blob = fn.random.uniform(range=[0, 1], shape=(3, 200, 100))
image = fn.reshape(blob, layout="CHW")
per_channel = np.array([3, 5, 7])
return fn.gaussian_blur(image, window_size=fn.per_frame(per_channel))
pipe = pipeline(batch_size=8, num_threads=4, device_id=0)
pipe.build()
pipe.run()
|
DALI-main
|
dali/test/python/operator_1/test_gaussian_blur.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import nvidia.dali as dali
import nvidia.dali.fn as fn
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import test_utils
def test_unified_arg_placement():
batch_size = 30
pipe = Pipeline(batch_size, 1, None)
with pipe:
u = ops.random.Uniform()(range=(1, 2), shape=3)
tr = ops.transforms.Translation(offset=u, device="cpu")
pipe.set_outputs(tr(), u)
pipe.build()
matrices, offsets = pipe.run()
assert len(matrices) == batch_size
for i in range(len(matrices)):
offset = offsets.at(i)
matrix = matrices.at(i)
assert offset.shape == (3,)
for j in range(3):
assert offset[j] >= 1 and offset[j] < 2 # check that it's not all zeros or sth
T = offset[:, np.newaxis] # convert to a columnn
assert np.array_equal(matrix, np.concatenate([np.identity(3), T], axis=1))
def test_compose():
batch_size = 3
pipe = Pipeline(batch_size, 1, None)
u = ops.random.Uniform()(range=(1, 2), shape=3)
c1 = ops.Compose([
ops.transforms.Translation(offset=u),
ops.transforms.Scale(scale=[1, 1, -1])
])
c2 = ops.Compose([
c1,
ops.transforms.Rotation(angle=90, axis=[0, 0, 1])
])
pipe.set_outputs(c2(fn.transforms.scale(scale=[2, 2, 2])), u)
pipe.build()
matrices, offsets = pipe.run()
assert len(matrices) == batch_size
for i in range(len(matrices)):
offset = offsets.at(i)
matrix = matrices.at(i)
assert offset.shape == (3,)
for j in range(3):
assert offset[j] >= 1 and offset[j] < 2 # check that it's not all zeros or sth
mtx = np.float32([[0, -1, 0],
[1, 0, 0],
[0, 0, -1]])
T = offset[:, np.newaxis] # convert to a columnn
T = np.dot(mtx, T)
mtx *= 2
assert np.allclose(matrix, np.concatenate([mtx, T], axis=1), rtol=1e-5, atol=1e-6)
test_data_root = os.environ['DALI_EXTRA_PATH']
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
def test_compose_change_device():
batch_size = 3
pipe = Pipeline(batch_size, 1, 0)
size = fn.random.uniform(shape=2, range=(300, 500))
c = ops.Compose([
ops.decoders.Image(device="cpu"),
ops.Resize(size=size, device="gpu")
])
files, labels = fn.readers.caffe(path=caffe_db_folder, seed=1)
pipe.set_outputs(c(files), fn.resize(fn.decoders.image(files).gpu(), size=size))
pipe.build()
out = pipe.run()
assert isinstance(out[0], dali.backend.TensorListGPU)
test_utils.check_batch(out[0], out[1], batch_size=batch_size)
|
DALI-main
|
dali/test/python/operator_1/test_compose.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali.fn as fn
import nvidia.dali.types as types
from nose.tools import nottest
from nvidia.dali import pipeline_def
from test_utils import np_type_to_dali
import itertools
from nose2.tools import params
def ref_cast(x, dtype):
if np.issubdtype(dtype, np.integer):
lo = np.iinfo(dtype).min
hi = np.iinfo(dtype).max
if np.issubdtype(x.dtype, np.floating):
x = np.round(x)
return x.clip(lo, hi).astype(dtype)
else:
return x.astype(dtype)
def random_shape(rng, ndim: int, max_size: int):
if ndim == 0:
return []
max_size = int(max_size ** (1 / ndim))
return list(rng.integers(1, max_size, [ndim]))
def replace_with_empty_volumes(rng, input, empty_volume_policy):
"""Replaces samples with 0-volumed ones if possible.
Parameters
----------
rng :
rng
input : List of np.array
Batch to process
empty_volume_policy : str
one of "left", "right, "middle", "mixed", "all", to indicate if the batch suffix, prefix,
infix or all of them should be randomly replaced with 0-volumed samples
Returns
-------
List of np.array
"""
if empty_volume_policy is None:
return input
if len(input[0].shape) == 0:
return input
if empty_volume_policy == "mixed":
left = replace_with_empty_volumes(rng, input, "left")
left_and_mid = replace_with_empty_volumes(rng, left, "middle")
return replace_with_empty_volumes(rng, left_and_mid, "right")
if empty_volume_policy == "all":
start = 0
end = len(input)
elif empty_volume_policy == "left":
start = 0
end = rng.integers(1, len(input) // 3)
elif empty_volume_policy == "right":
start = rng.integers(len(input) * 2 // 3, len(input) - 1)
end = len(input)
elif empty_volume_policy == "middle":
start = rng.integers(1 + len(input) // 3, len(input) * 2 // 3)
end = rng.integers(start + 1, len(input) - 1)
for i in range(start, end):
shape = list(input[i].shape)
shape[0] = 0
input[i] = np.zeros(dtype=input[i].dtype, shape=shape)
return input
def generate(rng, ndim: int, batch_size: int, in_dtype: np.dtype, out_dtype: np.dtype,
empty_volume_policy: str):
lo, hi = -1000, 1000
if np.issubdtype(out_dtype, np.integer):
lo = np.iinfo(out_dtype).min
hi = np.iinfo(out_dtype).max
if hi < np.iinfo(np.int64).max:
r = hi - lo
hi += r // 2
lo -= r // 2
if np.issubdtype(in_dtype, np.integer):
lo = max(np.iinfo(in_dtype).min, lo)
hi = min(np.iinfo(in_dtype).max, hi)
else:
lo = max(-np.finfo(in_dtype).max, lo)
hi = min(np.finfo(in_dtype).max, hi)
max_size = 100000 // batch_size
out = [rng.uniform(lo, hi, size=random_shape(rng, ndim, max_size)).astype(in_dtype) for _ in
range(batch_size)]
out = replace_with_empty_volumes(rng, out, empty_volume_policy)
if np.issubdtype(in_dtype, np.floating) and np.issubdtype(out_dtype, np.integer):
for x in out:
# avoid exactly halfway numbers - rounding is different for CPU and GPU
halfway = x[x - np.floor(x) == 0.5]
x[x - np.floor(x) == 0.5] = np.nextafter(halfway, np.Infinity)
return out
rng = np.random.default_rng(1234)
@nottest
def _test_operator_cast(ndim, batch_size, in_dtype, out_dtype, device, empty_volume_policy=None):
def src():
return generate(rng, ndim, batch_size, in_dtype, out_dtype, empty_volume_policy)
@pipeline_def(batch_size=batch_size, num_threads=4,
device_id=types.CPU_ONLY_DEVICE_ID if device == 'cpu' else 0)
def cast_pipe():
inp = fn.external_source(src)
inp_dev = inp.gpu() if device == 'gpu' else inp
return inp, fn.cast(inp_dev, dtype=np_type_to_dali(out_dtype))
pipe = cast_pipe()
pipe.build()
for _ in range(10):
inp, out = pipe.run()
if device == 'gpu':
out = out.as_cpu()
ref = [ref_cast(np.array(x), out_dtype) for x in inp]
# work around a bug in numpy: when the argument is a scalar fp32 or fp16, nextafter
# promotes it to fp64, resulting in insufficient epsilon - we want an epsilon of the
# type specified in out_dtype
eps = 0 if np.issubdtype(out_dtype, np.integer) else \
(np.nextafter(out_dtype([1]), 2) - 1.0)[0]
for i in range(batch_size):
if not np.allclose(out[i], ref[i], eps):
matI = np.array(inp[i])
matO = np.array(out[i])
matR = ref[i]
mask = np.logical_not(np.isclose(matO, matR, eps))
print(f"At sample {i}:\nI:\n{matI}\nO\n{matO}\nR\n{matR}")
print(f"Differences at {mask}:\nI:\n{matI[mask]}\nO\n{matO[mask]}\nR\n{matR[mask]}")
print(f"Result: {np.count_nonzero(mask)} wrong values out of {mask.size}.")
assert np.array_equal(out[i], ref[i])
def test_operator_cast():
types = [np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64, np.int64,
np.float16, np.float32]
for device in ['cpu', 'gpu']:
for in_type in types:
for out_type in types:
ndim = rng.integers(0, 4)
batch_size = rng.integers(1, 11)
yield _test_operator_cast, ndim, batch_size, in_type, out_type, device
def test_operator_cast_empty_volumes():
types = [np.uint8, np.int32, np.float32]
for device in ['cpu', 'gpu']:
for in_type in types:
for out_type in types:
ndim = rng.integers(0, 4)
batch_size = rng.integers(12, 64)
for empty_volume_policy in [
rng.choice(["left", "right", "middle", "mixed"]), "all"
]:
yield (_test_operator_cast, ndim, batch_size, in_type, out_type, device,
empty_volume_policy)
@params(*itertools.product((('cpu', 'cpu'), ('gpu', 'cpu'), ('gpu', 'gpu')),
(np.uint8, np.int32, np.float32),
(np.uint8, np.int32, np.float32)))
def test_cast_like(devices, dtype_in, dtype_out):
@pipeline_def(batch_size=1, num_threads=4, device_id=0)
def cast_pipe():
device_left, device_right = devices
data0 = fn.random.uniform(range=[0, 255], dtype=np_type_to_dali(dtype_in),
device=device_left)
data1 = fn.random.uniform(range=[0, 255], dtype=np_type_to_dali(dtype_out),
device=device_right)
return fn.cast_like(data0, data1)
p = cast_pipe()
p.build()
out, = p.run()
expected_type = np_type_to_dali(dtype_out)
assert out.dtype == expected_type, f"{out.dtype} != {expected_type}"
|
DALI-main
|
dali/test/python/operator_1/test_cast.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import librosa
import numpy as np
import nvidia.dali.types as types
import test_utils
import os
import nvidia.dali.fn as fn
from nvidia.dali import pipeline_def
audio_files = test_utils.get_files(os.path.join('db', 'audio', 'wav'), 'wav')
def trim_ref(cutoff_db, ref, frame_length, hop_length, input_data):
yt, index = librosa.effects.trim(y=input_data, top_db=-cutoff_db, ref=ref,
frame_length=frame_length,
hop_length=hop_length)
# librosa's trim function calculates power with reference to center of window,
# while DALI uses beginning of window. Hence the subtraction below
begin = index[0] - frame_length // 2
length = index[1] - index[0]
if length != 0:
length += frame_length - 1
return np.array(begin), np.array(length)
@pipeline_def
def nonsilent_region_pipe(cutoff_value, window_size, reference_power, reset_interval):
raw, _ = fn.readers.file(files=audio_files)
audio, _ = fn.decoders.audio(raw, dtype=types.FLOAT, downmix=True)
begin_cpu, len_cpu = fn.nonsilent_region(
audio, cutoff_db=cutoff_value, window_length=window_size,
reference_power=reference_power,
reset_interval=reset_interval
)
begin_gpu, len_gpu = fn.nonsilent_region(
audio.gpu(), cutoff_db=cutoff_value, window_length=window_size,
reference_power=reference_power,
reset_interval=reset_interval
)
return audio, begin_cpu, len_cpu, begin_gpu, len_gpu
def check_nonsilence_operator(batch_size, cutoff_value, window_size, reference_power,
reset_interval, eps):
pipe = nonsilent_region_pipe(
cutoff_value, window_size, reference_power, reset_interval,
batch_size=batch_size, num_threads=3, device_id=0, seed=42,
)
hop_length = 1
ref = np.max if not reference_power else reference_power
pipe.build()
for _ in range(3):
audio_batch_cpu, begin_batch_cpu, len_batch_cpu, begin_batch_gpu, len_batch_gpu = pipe.run()
for s in range(batch_size):
audio_cpu = test_utils.as_array(audio_batch_cpu[s])
begin_cpu = test_utils.as_array(begin_batch_cpu[s])
len_cpu = test_utils.as_array(len_batch_cpu[s])
begin_gpu = test_utils.as_array(begin_batch_gpu[s])
len_gpu = test_utils.as_array(len_batch_gpu[s])
ref_begin, ref_len = trim_ref(
cutoff_value, ref, window_size, hop_length, audio_cpu
)
np.testing.assert_allclose(ref_begin, begin_cpu, atol=eps)
np.testing.assert_allclose(ref_begin, begin_gpu, atol=eps)
np.testing.assert_allclose(ref_len, len_cpu, atol=eps)
np.testing.assert_allclose(ref_len, len_gpu, atol=eps)
np.testing.assert_allclose(begin_cpu, begin_gpu, atol=1)
np.testing.assert_allclose(len_cpu, len_gpu, atol=10)
def test_nonsilence_operator():
batch_size = 3
window_sizes = [512, 1024]
reset_intervals = [-1, 2048, 8192]
references_power = [None, .0003]
cutoff_coeffs = [-10, -60, -80]
for ws in window_sizes:
for ri in reset_intervals:
for rp in references_power:
for cc in cutoff_coeffs:
yield check_nonsilence_operator, \
batch_size, cc, ws, rp, ri, ws
def test_cpu_vs_gpu():
batch_size = 8
@pipeline_def
def nonsilent_pipe(data_arr=None, window_size=256, cutoff_value=-10, reference_power=None):
if data_arr is None:
raw, _ = fn.readers.file(files=audio_files)
audio, _ = fn.decoders.audio(raw, dtype=types.INT16, downmix=True)
else:
audio = types.Constant(device='cpu', value=data_arr)
begin_cpu, len_cpu = fn.nonsilent_region(
audio, cutoff_db=cutoff_value, window_length=window_size,
reference_power=reference_power,
)
begin_gpu, len_gpu = fn.nonsilent_region(
audio.gpu(), cutoff_db=cutoff_value, window_length=window_size,
reference_power=reference_power,
)
return begin_cpu, len_cpu, begin_gpu, len_gpu
audio_arr = np.zeros([10 + 1 + 10], dtype=np.int16)
audio_arr[10] = 3000
pipe = nonsilent_pipe(data_arr=audio_arr, window_size=1, cutoff_value=-80,
batch_size=1, num_threads=3, device_id=0)
pipe.build()
begin_cpu, len_cpu, begin_gpu, len_gpu = [test_utils.as_array(out[0]) for out in pipe.run()]
assert begin_cpu == begin_gpu == 10
assert len_cpu == len_gpu == 1
audio_arr[10:15] = 3000
pipe = nonsilent_pipe(data_arr=audio_arr, window_size=1,
batch_size=1, num_threads=3, device_id=0)
pipe.build()
begin_cpu, len_cpu, begin_gpu, len_gpu = [test_utils.as_array(out[0]) for out in pipe.run()]
assert begin_cpu == begin_gpu == 10
assert len_cpu == len_gpu == 5
window = 5
pipe = nonsilent_pipe(data_arr=audio_arr, window_size=5,
batch_size=1, num_threads=3, device_id=0)
pipe.build()
outputs = pipe.run()
begin_cpu, len_cpu, begin_gpu, len_gpu = [test_utils.as_array(out[0]) for out in outputs]
assert begin_cpu == begin_gpu == (10 - window + 1)
assert len_cpu == len_gpu == 13
pipe = nonsilent_pipe(batch_size=batch_size, num_threads=3, device_id=0, seed=42)
pipe.build()
for _ in range(3):
begin_batch_cpu, len_batch_cpu, begin_batch_gpu, len_batch_gpu = pipe.run()
for s in range(batch_size):
begin_cpu = test_utils.as_array(begin_batch_cpu[s])
len_cpu = test_utils.as_array(len_batch_cpu[s])
begin_gpu = test_utils.as_array(begin_batch_gpu[s])
len_gpu = test_utils.as_array(len_batch_gpu[s])
np.testing.assert_array_equal(begin_cpu, begin_gpu)
np.testing.assert_array_equal(len_cpu, len_gpu)
|
DALI-main
|
dali/test/python/operator_1/test_nonsilence.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali.ops as ops
from nvidia.dali.types import Constant
import numpy as np
from nose.tools import assert_equal
from nose_utils import assert_raises
def test_group_inputs():
e0 = ops._DataNode("op0", "cpu")
e1 = ops._DataNode("op1", "cpu")
inputs = [e0, e1, 10.0, Constant(0).uint8(), 42]
cat_idx, edges, integers, reals = ops._group_inputs(inputs)
assert_equal([("edge", 0), ("edge", 1), ("real", 0),
("integer", 0), ("integer", 1)], cat_idx)
assert_equal([e0, e1], edges)
assert_equal([Constant(0).uint8(), 42], integers)
assert_equal([10.0], reals)
assert_raises(TypeError, ops._group_inputs, [np.complex()],
glob="Expected scalar value of type 'bool', 'int' or 'float', got *.")
_, _, _, none_reals = ops._group_inputs([e0, 10])
assert_equal(None, none_reals)
def test_generate_input_desc():
desc0 = ops._generate_input_desc([("edge", 0)], [], [])
desc1 = ops._generate_input_desc(
[("edge", 0), ("edge", 1), ("edge", 2)], [], [])
assert_equal("&0", desc0)
assert_equal("&0 &1 &2", desc1)
desc2 = ops._generate_input_desc([("integer", 1), ("integer", 0), ("edge", 0)],
[Constant(42).uint8(), 42], [])
assert_equal("$1:int32 $0:uint8 &0", desc2)
c = Constant(42)
desc3 = ops._generate_input_desc(
[("integer", 0), ("integer", 1), ("integer", 2), ("integer", 3), ("integer", 4),
("integer", 5), ("integer", 6), ("integer", 7), ("integer", 8)],
[int(), c.uint8(), c.uint16(), c.uint32(), c.uint64(), c.int8(), c.int16(),
c.int32(), c.int64()],
[])
assert_equal(
"$0:int32 $1:uint8 $2:uint16 $3:uint32 $4:uint64 $5:int8 $6:int16 $7:int32 $8:int64", desc3)
desc4 = ops._generate_input_desc(
[("real", 0), ("real", 1), ("real", 2), ("real", 3)],
[],
[float(), c.float16(), c.float32(), c.float64()])
assert_equal("$0:float32 $1:float16 $2:float32 $3:float64", desc4)
|
DALI-main
|
dali/test/python/operator_1/test_ops_expression_internals.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali as dali
import nvidia.dali.fn as fn
import numpy as np
def test_cat_numpy_array():
pipe = dali.pipeline.Pipeline(1, 1, None)
src = fn.external_source([[np.array([[10, 11], [12, 13]], dtype=np.float32)]])
pipe.set_outputs(fn.cat(src, np.array([[20], [21]], dtype=np.float32), axis=1))
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[10, 11, 20], [12, 13, 21]]))
def test_stack_numpy_scalar():
pipe = dali.pipeline.Pipeline(1, 1, None)
src = fn.external_source([[np.array([[10, 11], [12, 13]], dtype=np.float32)]])
pipe.set_outputs(fn.cat(src, np.array([[20], [21]], dtype=np.float32), axis=1))
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[10, 11, 20], [12, 13, 21]]))
def test_slice_fn():
pipe = dali.pipeline.Pipeline(1, 1, 0)
src = fn.external_source(
[[np.array([[10, 11, 12], [13, 14, 15], [16, 17, 18]], dtype=np.float32)]]
)
out_cpu = fn.slice(src, np.array([1, 1]), np.array([2, 1]), axes=[0, 1])
out_gpu = fn.slice(src.gpu(), np.array([1, 1]), np.array([2, 1]), axes=[0, 1])
pipe.set_outputs(out_cpu, out_gpu)
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[14], [17]]))
assert np.array_equal(o[1].as_cpu().at(0), np.array([[14], [17]]))
def test_slice_ops():
pipe = dali.pipeline.Pipeline(1, 1, 0)
src = fn.external_source(
[[np.array([[10, 11, 12], [13, 14, 15], [16, 17, 18]], dtype=np.float32)]]
)
slice_cpu = dali.ops.Slice(axes=[0, 1], device="cpu")
slice_gpu = dali.ops.Slice(axes=[0, 1], device="gpu")
out_cpu = slice_cpu(src, np.array([1, 1]), np.array([2, 1]))
out_gpu = slice_gpu(src.gpu(), np.array([1, 1]), np.array([2, 1]))
pipe.set_outputs(out_cpu, out_gpu)
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[14], [17]]))
assert np.array_equal(o[1].as_cpu().at(0), np.array([[14], [17]]))
def test_python_function():
pipe = dali.pipeline.Pipeline(3, 1, 0, exec_async=False, exec_pipelined=False)
with pipe:
def func(inp):
ret = [x * x for x in inp]
return ret
out_cpu = fn.python_function(np.array([[1, 2], [3, 4]]),
function=func, batch_processing=True)
pipe.set_outputs(out_cpu)
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[1, 4], [9, 16]]))
def test_arithm_ops():
pipe = dali.pipeline.Pipeline(1, 1, None)
with pipe:
in1 = fn.external_source([[np.uint8([[1, 2], [3, 4]])]])
pipe.set_outputs(in1 + np.array([[10, 20], [30, 40]]),
in1 + np.array(5),
in1 + np.uint8(100))
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[11, 22], [33, 44]]))
assert np.array_equal(o[1].at(0), np.array([[6, 7], [8, 9]]))
assert np.array_equal(o[2].at(0), np.array([[101, 102], [103, 104]]))
def test_arg_input():
pipe = dali.pipeline.Pipeline(1, 1, None)
with pipe:
in1 = fn.external_source([[np.float32([[1, 2, 3], [4, 5, 6]])]])
pipe.set_outputs(fn.transforms.translation(in1, offset=np.float32([10, 20])))
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[1, 2, 13], [4, 5, 26]]))
|
DALI-main
|
dali/test/python/operator_1/test_input_promotion.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali as dali
import nvidia.dali.fn as fn
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import os
from functools import partial
from nvidia.dali import pipeline_def
from nvidia.dali.pipeline import Pipeline
from nose2.tools import params
from nose_utils import assert_raises
from nose_utils import raises
from test_slice import check_slice_output, abs_slice_start_and_end
from test_utils import RandomDataIterator
from test_utils import as_array
from test_utils import compare_pipelines, dali_type_to_np
from test_utils import get_dali_extra_path
import itertools
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
fn_dev_pairs = [(fn.crop_mirror_normalize, 'cpu'),
(fn.crop_mirror_normalize, 'gpu')]
op_dev_pairs = [(ops.CropMirrorNormalize, 'cpu'),
(ops.CropMirrorNormalize, 'gpu')]
def next_power_of_two(x):
return 1 if x == 0 else 2 ** (x - 1).bit_length()
class CropMirrorNormalizePipeline(Pipeline):
def __init__(self, cmn_op, device, batch_size, num_threads=1, device_id=0, num_gpus=1,
dtype=types.FLOAT, output_layout="HWC", mirror_probability=0.0,
mean=[0., 0., 0.], std=[1., 1., 1.], scale=None, shift=None, pad_output=False):
super().__init__(batch_size, num_threads, device_id, seed=7865)
self.device = device
self.input = ops.readers.Caffe(path=caffe_db_folder, shard_id=device_id,
num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.cmn = cmn_op(device=self.device,
dtype=dtype,
output_layout=output_layout,
crop=(224, 224),
crop_pos_x=0.3,
crop_pos_y=0.2,
mean=mean,
std=std,
scale=scale,
shift=shift,
pad_output=pad_output)
self.coin = ops.random.CoinFlip(probability=mirror_probability, seed=7865)
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
if self.device == 'gpu':
images = images.gpu()
rng = self.coin()
images = self.cmn(images, mirror=rng)
return images
class NoCropPipeline(Pipeline):
def __init__(self, cmn_op, device, batch_size, num_threads=1, device_id=0, num_gpus=1,
decoder_only=False):
super(NoCropPipeline, self).__init__(batch_size, num_threads, device_id)
self.decoder_only = decoder_only
self.device = device
self.input = ops.readers.Caffe(path=caffe_db_folder, shard_id=device_id,
num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
if not self.decoder_only:
self.cast = cmn_op(device=self.device, dtype=types.FLOAT,
output_layout="HWC")
else:
self.cast = ops.Cast(device=self.device, dtype=types.FLOAT)
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
if self.device == 'gpu':
images = images.gpu()
images = self.cast(images)
return images
def check_cmn_no_crop_args_vs_decoder_only(cmn_op, device, batch_size):
compare_pipelines(NoCropPipeline(cmn_op, device, batch_size, decoder_only=True),
NoCropPipeline(cmn_op, device, batch_size, decoder_only=False),
batch_size=batch_size, N_iterations=3)
def test_cmn_no_crop_args_vs_decoder_only():
for cmn_op, device in op_dev_pairs:
for batch_size in {1, 4}:
yield check_cmn_no_crop_args_vs_decoder_only, cmn_op, device, batch_size
class PythonOpPipeline(Pipeline):
def __init__(self, batch_size, function, output_layout, mirror_probability, num_threads=1,
device_id=0, num_gpus=1):
super().__init__(batch_size, num_threads, device_id, seed=7865, exec_async=False,
exec_pipelined=False)
self.input = ops.readers.Caffe(path=caffe_db_folder, shard_id=device_id,
num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.cmn = ops.PythonFunction(function=function, output_layouts=output_layout)
self.coin = ops.random.CoinFlip(probability=mirror_probability, seed=7865)
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
images = self.cmn(images, self.coin())
return images
def crop_mirror_normalize_func(crop_z, crop_y, crop_x,
crop_d, crop_h, crop_w,
should_pad, mean, std,
scale, shift,
input_layout, output_layout, dtype, image, should_flip):
scale = scale or 1
shift = shift or 0
assert input_layout == "HWC" or input_layout == "FHWC" \
or input_layout == "DHWC" or input_layout == "FDHWC"
assert len(input_layout) == len(image.shape)
assert input_layout.count('H') > 0
dim_h = input_layout.find('H')
H = image.shape[dim_h]
assert input_layout.count('W') > 0
dim_w = input_layout.find('W')
W = image.shape[dim_w]
assert input_layout.count('C') > 0
dim_c = input_layout.find('C')
C = image.shape[dim_c]
D = 1
if input_layout.count('D') > 0:
dim_d = input_layout.find('D')
D = image.shape[dim_d]
assert D >= crop_d
F = 1
if input_layout.count('F') > 0:
dim_f = input_layout.find('F')
F = image.shape[dim_f]
assert H >= crop_h and W >= crop_w
start_y = int(np.float32(crop_y) * np.float32(H - crop_h) + np.float32(0.5))
end_y = start_y + crop_h
start_x = int(np.float32(crop_x) * np.float32(W - crop_w) + np.float32(0.5))
end_x = start_x + crop_w
if input_layout.count('D') > 0:
assert D >= crop_d
start_z = int(np.float32(crop_z) * np.float32(D - crop_d) + np.float32(0.5))
end_z = start_z + crop_d
# Crop
if input_layout == "HWC":
out = image[start_y:end_y, start_x:end_x, :]
H, W = out.shape[0], out.shape[1]
elif input_layout == "FHWC":
out = image[:, start_y:end_y, start_x:end_x, :]
H, W = out.shape[1], out.shape[2]
elif input_layout == "DHWC":
out = image[start_z:end_z, start_y:end_y, start_x:end_x, :]
D, H, W = out.shape[0], out.shape[1], out.shape[2]
elif input_layout == "FDHWC":
out = image[:, start_z:end_z, start_y:end_y, start_x:end_x, :]
D, H, W = out.shape[1], out.shape[2], out.shape[3]
if not mean:
mean = [0.0]
if not std:
std = [1.0]
if len(mean) == 1:
mean = C * mean
if len(std) == 1:
std = C * std
assert len(mean) == C and len(std) == C
inv_std = [np.float32(1.0) / np.float32(std[c]) for c in range(C)]
mean = np.float32(mean)
assert input_layout.count('W') > 0
horizontal_dim = input_layout.find('W')
out1 = np.flip(out, horizontal_dim) if should_flip else out
# Pad, normalize, transpose
out_C = next_power_of_two(C) if should_pad else C
if input_layout == "HWC":
out2 = np.zeros([H, W, out_C], dtype=np.float32)
out2[:, :, 0:C] = (np.float32(out1) - mean) * inv_std * scale + shift
ret = np.transpose(out2, (2, 0, 1)) if output_layout == "CHW" else out2
elif input_layout == "FHWC":
out2 = np.zeros([F, H, W, out_C], dtype=np.float32)
out2[:, :, :, 0:C] = (np.float32(out1) - mean) * inv_std * scale + shift
ret = np.transpose(out2, (0, 3, 1, 2)) if output_layout == "FCHW" else out2
elif input_layout == "DHWC":
out2 = np.zeros([D, H, W, out_C], dtype=np.float32)
out2[:, :, :, 0:C] = (np.float32(out1) - mean) * inv_std * scale + shift
ret = np.transpose(out2, (3, 0, 1, 2)) if output_layout == "CDHW" else out2
elif input_layout == "FDHWC":
out2 = np.zeros([F, D, H, W, out_C], dtype=np.float32)
out2[:, :, :, :, 0:C] = (np.float32(out1) - mean) * inv_std * scale + shift
ret = np.transpose(out2, (0, 4, 1, 2, 3)) if output_layout == "FCDHW" else out2
else:
raise RuntimeError("The test function received unsupported layout {}".format(input_layout))
# clamp the result to output type's dynamic range
if np.issubdtype(dtype, np.integer):
lo = np.iinfo(dtype).min
hi = np.iinfo(dtype).max
ret = np.clip(ret, lo, hi)
return ret
def check_cmn_vs_numpy(cmn_op, device, batch_size, dtype, output_layout,
mirror_probability, mean, std, scale, shift, should_pad):
crop_z, crop_y, crop_x = (0.1, 0.2, 0.3)
crop_d, crop_h, crop_w = (10, 224, 224)
function = partial(crop_mirror_normalize_func,
crop_z, crop_y, crop_x,
crop_d, crop_h, crop_w,
should_pad,
mean, std, scale, shift,
"HWC", output_layout, dali_type_to_np(dtype))
iterations = 8 if batch_size == 1 else 1
eps, max_err = (1e-5, 1e-5) if dtype == types.FLOAT else (0.3, 0.6)
compare_pipelines(
CropMirrorNormalizePipeline(
cmn_op, device, batch_size, dtype=dtype, output_layout=output_layout,
mirror_probability=mirror_probability, mean=mean, std=std,
scale=scale, shift=shift, pad_output=should_pad),
PythonOpPipeline(batch_size, function, output_layout, mirror_probability),
batch_size=batch_size, N_iterations=iterations, eps=eps, max_allowed_error=max_err)
def test_cmn_vs_numpy():
norm_data = [
([0., 0., 0.], [1., 1., 1.]),
([0.5 * 255], [0.225 * 255]),
([0.485 * 255, 0.456 * 255, 0.406 * 255], [0.229 * 255, 0.224 * 255, 0.225 * 255])
]
type_scale_shift = [
(types.FLOAT, None, None),
(types.FLOAT16, None, None),
(types.UINT8, 64, 128),
(types.INT8, 50, 5)
]
np.random.seed(12321)
for cmn_op, device in op_dev_pairs:
for batch_size in [1, 4]:
for output_layout in ["HWC", "CHW"]:
mirror_probs = [0.5] if batch_size > 1 else [0.0, 1.0]
for mirror_probability in mirror_probs:
for should_pad in [False, True]:
mean, std = norm_data[np.random.randint(0, len(norm_data))]
dtype, default_scale, default_shift = type_scale_shift[
np.random.randint(0, len(type_scale_shift))]
shift = default_shift if mean and mean[0] > 1 else None
scale = default_scale if std and std[0] > 1 else None
yield check_cmn_vs_numpy, cmn_op, device, batch_size, dtype, \
output_layout, mirror_probability, mean, std, scale, shift, should_pad
class CMNRandomDataPipeline(Pipeline):
def __init__(self, cmn_op, device, batch_size, layout, iterator, num_threads=1, device_id=0,
num_gpus=1, dtype=types.FLOAT, output_layout="FHWC", mirror_probability=0.0,
mean=[0., 0., 0.], std=[1., 1., 1.], scale=None, shift=None, pad_output=False,
crop_seq_as_depth=False, crop_d=8, crop_h=16, crop_w=32, crop_pos_x=0.3,
crop_pos_y=0.2, crop_pos_z=0.1, out_of_bounds_policy=None, fill_values=None,
extra_outputs=False):
super().__init__(batch_size, num_threads, device_id)
self.device = device
self.layout = layout
self.iterator = iterator
self.inputs = ops.ExternalSource()
self.extra_outputs = extra_outputs
if layout.count('D') <= 0 and not (crop_seq_as_depth and layout.count('F') > 0):
crop_d = None
self.cmn = cmn_op(device=self.device, dtype=dtype,
output_layout=output_layout, crop_d=crop_d,
crop_h=crop_h, crop_w=crop_w, crop_pos_x=crop_pos_x,
crop_pos_y=crop_pos_y, crop_pos_z=crop_pos_z, mean=mean,
std=std, pad_output=pad_output, scale=scale, shift=shift,
out_of_bounds_policy=out_of_bounds_policy,
fill_values=fill_values)
self.coin = ops.random.CoinFlip(probability=mirror_probability, seed=7865)
def define_graph(self):
self.data = self.inputs()
random_data = self.data.gpu() if self.device == 'gpu' else self.data
rng = self.coin()
out = self.cmn(random_data, mirror=rng)
if self.extra_outputs:
return out, random_data, rng
else:
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
class CMNRandomDataPythonOpPipeline(Pipeline):
def __init__(self, function, batch_size, layout, output_layout, mirror_probability, iterator,
num_threads=1, device_id=0):
super().__init__(batch_size, num_threads, device_id, exec_async=False, exec_pipelined=False)
self.layout = layout
self.iterator = iterator
self.inputs = ops.ExternalSource()
self.cmn = ops.PythonFunction(function=function, output_layouts=output_layout)
self.coin = ops.random.CoinFlip(probability=mirror_probability, seed=7865)
def define_graph(self):
self.data = self.inputs()
out = self.cmn(self.data, self.coin())
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
def check_cmn_random_data_vs_numpy(cmn_op, device, batch_size, dtype, input_layout, input_shape,
output_layout, mirror_probability, mean, std, scale, shift,
should_pad):
crop_z, crop_y, crop_x = (0.1, 0.2, 0.3)
crop_d, crop_h, crop_w = (8, 16, 32)
eii1 = RandomDataIterator(batch_size, shape=input_shape)
eii2 = RandomDataIterator(batch_size, shape=input_shape)
function = partial(crop_mirror_normalize_func,
crop_z, crop_y, crop_x,
crop_d, crop_h, crop_w,
should_pad,
mean, std, scale, shift, input_layout, output_layout,
dali_type_to_np(dtype))
cmn_pipe = CMNRandomDataPipeline(cmn_op, device, batch_size, input_layout, iter(eii1),
dtype=dtype, output_layout=output_layout,
mirror_probability=mirror_probability, mean=mean, std=std,
scale=scale, shift=shift, pad_output=should_pad)
ref_pipe = CMNRandomDataPythonOpPipeline(function, batch_size, input_layout, output_layout,
mirror_probability, iter(eii2))
eps, max_err = (1e-5, 1e-5) if dtype == types.FLOAT else (0.3, 0.6)
compare_pipelines(cmn_pipe, ref_pipe, batch_size, 2, eps=eps, max_allowed_error=max_err)
def test_cmn_random_data_vs_numpy():
norm_data = [
([0., 0., 0.], [1., 1., 1.]),
([0.485 * 255, 0.456 * 255, 0.406 * 255], [0.229 * 255, 0.224 * 255, 0.225 * 255]),
([0.485 * 255, 0.456 * 255, 0.406 * 255], None),
([0.485 * 255, 0.456 * 255, 0.406 * 255], [255.0, ]),
(None, [0.229 * 255, 0.224 * 255, 0.225 * 255]),
([128, ], [0.229 * 255, 0.224 * 255, 0.225 * 255])
]
output_layouts = {
"HWC": ["HWC", "CHW"],
"FHWC": ["FHWC", "FCHW"],
"DHWC": ["DHWC", "CDHW"],
"FDHWC": ["FDHWC", "FCDHW"]
}
input_shapes = {
"HWC": [(60, 80, 3)],
"FHWC": [(3, 60, 80, 3)],
"DHWC": [(10, 60, 80, 3)],
"FDHWC": [(3, 10, 60, 80, 3)]
}
np.random.seed(12345)
type_scale_shift = [(types.FLOAT, None, None),
(types.FLOAT16, None, None),
(types.UINT8, 64, 128),
(types.INT8, 50, 5)]
for cmn_op, device in op_dev_pairs:
for batch_size in [1, 4]:
for input_layout in ["HWC", "FHWC", "DHWC", "FDHWC"]:
for input_shape in input_shapes[input_layout]:
assert len(input_layout) == len(input_shape)
for output_layout in output_layouts[input_layout]:
mirror_probs = [0.5] if batch_size > 1 else [0.0, 1.0]
for mirror_probability in mirror_probs:
for should_pad in [False, True]:
mean, std = norm_data[np.random.randint(0, len(norm_data))]
dtype, default_scale, default_shift = type_scale_shift[
np.random.randint(0, len(type_scale_shift))]
shift = default_shift if mean and mean[0] > 1 else None
scale = default_scale if std and std[0] > 1 else None
yield check_cmn_random_data_vs_numpy, \
cmn_op, device, batch_size, dtype, input_layout, \
input_shape, output_layout, mirror_probability, \
mean, std, scale, shift, should_pad
def check_cmn_crop_sequence_length(cmn_op, device, batch_size, dtype, input_layout, input_shape,
output_layout, mirror_probability, mean, std, should_pad):
crop_d, crop_h, crop_w = (8, 16, 32)
eii1 = RandomDataIterator(batch_size, shape=input_shape)
pipe = CMNRandomDataPipeline(cmn_op, device, batch_size, input_layout, iter(eii1), dtype=dtype,
output_layout=output_layout, mirror_probability=mirror_probability,
mean=mean, std=std, pad_output=should_pad, crop_seq_as_depth=True)
pipe.build()
out = pipe.run()
out_data = out[0]
expected_out_shape = (crop_d, 3, crop_h, crop_w) if output_layout == "FCHW" else (
crop_d, crop_h, crop_w, 3)
for i in range(batch_size):
sh = as_array(out_data[i]).shape
assert sh == expected_out_shape, \
"Shape mismatch {} != {}".format(sh, expected_out_shape)
def test_cmn_crop_sequence_length():
# Tests cropping along the sequence dimension as if it was depth
input_layout = "FHWC"
output_layouts = ["FHWC", "FCHW"]
output_layouts = {
"FHWC": ["FHWC", "FCHW"],
}
input_shapes = {
"FHWC": [(10, 60, 80, 3)],
}
mean = [127, ]
std = [127, ]
should_pad = False
mirror_probability = 0.5
for cmn_op, device in op_dev_pairs:
for batch_size in [8]:
for dtype in [types.FLOAT]:
for input_shape in input_shapes[input_layout]:
assert len(input_layout) == len(input_shape)
for output_layout in output_layouts[input_layout]:
yield check_cmn_crop_sequence_length, cmn_op, device, batch_size, dtype, \
input_layout, input_shape, output_layout, mirror_probability, \
mean, std, should_pad
def check_cmn_with_out_of_bounds_policy_support(
cmn_op, device, batch_size, dtype, input_layout, input_shape, output_layout,
mirror_probability, mean, std, should_pad,
out_of_bounds_policy=None, fill_values=(0x76, 0xb9, 0x00)):
# This test case is written with HWC layout in mind and "HW" axes in slice arguments
assert input_layout == "HWC"
assert len(input_shape) == 3
if fill_values is not None and len(fill_values) > 1:
assert input_shape[2] == len(fill_values)
eii = RandomDataIterator(batch_size, shape=input_shape)
crop_y, crop_x = 0.5, 0.5
crop_h, crop_w = input_shape[0] * 2, input_shape[1] * 2
pipe = CMNRandomDataPipeline(cmn_op, device, batch_size, input_layout, iter(eii), dtype=dtype,
output_layout=output_layout, mirror_probability=mirror_probability,
mean=mean, std=std, pad_output=should_pad, crop_w=crop_w,
crop_h=crop_h, crop_pos_x=crop_x, crop_pos_y=crop_y,
out_of_bounds_policy=out_of_bounds_policy, fill_values=fill_values,
extra_outputs=True)
permute = None
if output_layout != input_layout:
permute = []
for d in range(len(input_layout)):
perm_d = input_layout.find(output_layout[d])
permute.append(perm_d)
if fill_values is None:
fill_values = 0
pipe.build()
for k in range(3):
outs = pipe.run()
out = outs[0]
in_data = outs[1]
mirror_data = outs[2]
if isinstance(out, dali.backend_impl.TensorListGPU):
out = out.as_cpu()
if isinstance(in_data, dali.backend_impl.TensorListGPU):
in_data = in_data.as_cpu()
assert batch_size == len(out)
for idx in range(batch_size):
sample_in = in_data.at(idx)
sample_out = out.at(idx)
mirror = mirror_data.at(idx)
flip = [0, mirror]
in_shape = list(sample_in.shape)
crop_anchor_norm = [crop_y, crop_x]
crop_shape = [crop_h, crop_w]
crop_anchor_abs = [crop_anchor_norm[k] * (input_shape[k] - crop_shape[k]) for k in
range(2)]
abs_start, abs_end, abs_slice_shape = abs_slice_start_and_end(in_shape[:2],
crop_anchor_abs,
crop_shape, False, False)
check_slice_output(sample_in, sample_out, crop_anchor_abs, abs_slice_shape, abs_start,
abs_end, out_of_bounds_policy, fill_values, mean=mean, std=std,
flip=flip, permute=permute)
def test_cmn_with_out_of_bounds_policy_support():
in_shape = (40, 80, 3)
in_layout = 'HWC'
dtype = types.FLOAT
mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
fill_values = (0x76, 0xb0, 0x00)
for out_of_bounds_policy in ['pad', 'trim_to_shape']:
for cmn_op, device in op_dev_pairs:
for batch_size in [1, 3]:
for out_layout in ['HWC', 'CHW']:
for mirror_probability in [0.5]:
for should_pad in [False, True]:
yield check_cmn_with_out_of_bounds_policy_support, cmn_op, device, \
batch_size, dtype, in_layout, in_shape, out_layout, \
mirror_probability, mean, std, should_pad, out_of_bounds_policy, \
fill_values
def check_cmn_with_out_of_bounds_error(cmn_op, device, batch_size, input_shape=(100, 200, 3)):
# This test case is written with HWC layout in mind and "HW" axes in slice arguments
layout = "HWC"
assert len(input_shape) == 3
eii = RandomDataIterator(batch_size, shape=input_shape)
crop_y, crop_x = 0.5, 0.5
crop_h, crop_w = input_shape[0] * 2, input_shape[1] * 2
pipe = CMNRandomDataPipeline(cmn_op, device, batch_size, layout, iter(eii),
dtype=types.FLOAT, output_layout=layout,
mirror_probability=0.5, mean=[127.], std=[127.], pad_output=True,
crop_w=crop_w, crop_h=crop_h,
crop_pos_x=crop_x, crop_pos_y=crop_y,
out_of_bounds_policy="error")
pipe.build()
pipe.run()
def test_slice_with_out_of_bounds_error():
in_shape = (40, 80, 3)
for cmn_op, device in op_dev_pairs:
for batch_size in [1, 3]:
yield raises(RuntimeError, "Slice can't be placed out of bounds with current policy.")(
check_cmn_with_out_of_bounds_error), cmn_op, device, batch_size, in_shape
def check_cmn_per_sample_norm_args(cmn_fn, device, rand_mean, rand_stdev, scale, shift):
@pipeline_def(num_threads=3, device_id=0)
def pipe():
image_like = fn.random.uniform(device=device, range=(0, 255), shape=(80, 120, 3))
image_like = fn.reshape(image_like, layout="HWC")
mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
if rand_mean:
mean = fn.random.uniform(range=(100, 125), shape=(3,))
if rand_stdev:
std = fn.random.uniform(range=(55, 60), shape=(3,))
out = cmn_fn(image_like, dtype=types.FLOAT, output_layout="HWC",
mean=mean, std=std, scale=scale, shift=shift,
pad_output=False)
return out, image_like, mean, std
batch_size = 10
p = pipe(batch_size=batch_size)
p.build()
for _ in range(3):
outs = p.run()
for s in range(batch_size):
out, image_like, mean, std = [
np.array(o[s].as_cpu()) if isinstance(o, dali.backend_impl.TensorListGPU) else
np.array(o[s]) for o in outs]
ref_scale = scale or 1.0
ref_shift = shift or 0.0
ref_out = ref_scale * (image_like - mean) / std + ref_shift
np.testing.assert_allclose(out, ref_out, atol=ref_scale * 1e-6)
def test_per_sample_norm_args():
for cmn_fn, device in fn_dev_pairs:
for random_mean, random_std in [(True, True), (True, False), (False, True)]:
for scale, shift in [(None, None), (255.0, -128.0)]:
yield check_cmn_per_sample_norm_args, \
cmn_fn, device, random_mean, random_std, scale, shift
def check_crop_mirror_normalize_wrong_layout(cmn_fn, device, batch_size, input_shape=(100, 200, 3),
layout="ABC"):
assert len(layout) == len(input_shape)
@pipeline_def
def get_pipe():
def get_data():
out = [np.zeros(input_shape, dtype=np.uint8) for _ in range(batch_size)]
return out
data = fn.external_source(source=get_data, layout=layout, device=device)
return cmn_fn(data, crop_h=10, crop_w=10)
pipe = get_pipe(batch_size=batch_size, device_id=0, num_threads=3)
pipe.build()
with assert_raises(RuntimeError,
glob=f"The layout \"{layout}\" does not match any of the allowed layouts"):
pipe.run()
def test_crop_mirror_normalize_wrong_layout():
in_shape = (40, 80, 3)
batch_size = 3
for cmn_fn, device in fn_dev_pairs:
for layout in ['ABC']:
yield check_crop_mirror_normalize_wrong_layout, \
cmn_fn, device, batch_size, in_shape, layout
def check_crop_mirror_normalize_empty_layout(cmn_fn, device, batch_size, input_shape=(100, 200, 3)):
@pipeline_def
def get_pipe():
def get_data():
out = [np.zeros(input_shape, dtype=np.uint8) for _ in range(batch_size)]
return out
data = fn.external_source(source=get_data, device=device)
return cmn_fn(data, crop_h=10, crop_w=20)
pipe = get_pipe(batch_size=batch_size, device_id=0, num_threads=3)
pipe.build()
data, = pipe.run()
for i in range(batch_size):
assert as_array(data[i]).shape == (3, 10, 20) # CHW by default
def test_crop_mirror_normalize_empty_layout():
in_shape = (40, 80, 3)
batch_size = 3
for cmn_fn, device in fn_dev_pairs:
yield check_crop_mirror_normalize_empty_layout, cmn_fn, device, batch_size, in_shape
batch_sizes = [1, 4]
shapes = [(1, 1, 3), (1, 10, 3), (1, 31, 3), (1, 32, 3), (1, 33, 3), (1, 127, 3), (1, 128, 3),
(1, 129, 3), (1, 24 * 128 - 1, 3), (1, 24 * 128, 3), (1, 24 * 128 + 1, 3),
(8, 24 * 128 - 1, 3), (8, 24 * 128, 3), (8, 24 * 128 + 1, 3), (1024, 1024, 3),
(999, 999, 3)]
dtypes = [types.FLOAT, types.FLOAT16]
pads = [False, True]
mirrors = [False, True]
crops = [(1.0, 0.25), (0.25, 0.25), (0.25, 1.0), (0.5, 0.75), (None, None)]
layouts = ["HWC", "CHW"]
@params(*itertools.product(batch_sizes, shapes, dtypes, pads, mirrors, crops, layouts))
def test_cmn_optimized_vs_cpu(batch_size, shape, dtype, pad, mirror, crops, layout):
@pipeline_def(batch_size=batch_size, device_id=0, num_threads=4)
def pipe(device):
def get_data():
out = [
np.arange(np.prod(shape), dtype=np.uint8).reshape(shape) for _ in range(batch_size)
]
return out
data = fn.external_source(source=get_data)
crop_h, crop_w = crops
crop_h_int = int(crop_h * shape[0]) if crop_h else None
crop_w_int = int(crop_w * shape[1]) if crop_w else None
data = data.gpu() if device == "gpu" else data
return fn.crop_mirror_normalize(data, device=device, dtype=dtype, pad_output=pad,
mirror=mirror, crop_h=crop_h_int, crop_w=crop_w_int,
mean=[0.1, 0.2, 0.3],
fill_values=[0.0, 0.0, 0.0, 42.0] if pad else None,
output_layout=layout)
pipe_baseline = pipe("cpu")
pipe_opt = pipe("gpu")
compare_pipelines(pipe_baseline, pipe_opt, batch_size, 3)
|
DALI-main
|
dali/test/python/operator_1/test_crop_mirror_normalize.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali as dali
import nvidia.dali.fn as fn
import numpy as np
import math
from test_utils import check_batch
np.random.seed(1234)
def input_generator(num_inputs, batch_size, ndim, variable_axis=None):
if ndim <= 0:
max_extent = 1
else:
max_extent = int(math.ceil(math.pow(1e+6 / (batch_size * num_inputs), 1 / ndim)))
def gen():
if ndim == 0:
inputs = []
for i in range(num_inputs):
inputs.append([np.float32(np.random.random()) for _ in range(batch_size)])
return inputs
inputs = [[] for _ in range(num_inputs)]
for i in range(batch_size):
base_shape = np.random.randint(1, max_extent, [ndim])
for j in range(num_inputs):
shape = list(base_shape)
if variable_axis is not None:
shape[variable_axis] = np.random.randint(1, 10)
inputs[j].append(np.random.random(shape).astype(np.float32))
return inputs
return gen
def test_cat_different_length():
pipe = dali.pipeline.Pipeline(batch_size=1, num_threads=3, device_id=0)
with pipe:
src1 = dali.types.Constant(np.array(
[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]))
src2 = dali.types.Constant(np.array(
[[13, 14, 15],
[16, 17, 18],
[19, 20, 21]]))
out_cpu = fn.cat(src1, src2, axis=1)
out_gpu = fn.cat(src1.gpu(), src2.gpu(), axis=1)
pipe.set_outputs(out_cpu, out_gpu)
pipe.build()
o = pipe.run()
o = list(o)
o[1] = o[1].as_cpu()
ref = np.array([[1, 2, 3, 4, 13, 14, 15],
[5, 6, 7, 8, 16, 17, 18],
[9, 10, 11, 12, 19, 20, 21]])
assert np.array_equal(o[0].at(0), ref)
assert np.array_equal(o[1].at(0), ref)
def test_cat_empty_input():
pipe = dali.pipeline.Pipeline(batch_size=1, num_threads=3, device_id=0)
with pipe:
src1 = dali.types.Constant(np.array(
[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]))
src2 = dali.types.Constant(np.array(
[[],
[],
[]], dtype=np.int32))
src3 = dali.types.Constant(np.array(
[[13, 14, 15],
[16, 17, 18],
[19, 20, 21]]))
out_cpu = fn.cat(src1, src2, src3, axis=1)
out_gpu = fn.cat(src1.gpu(), src2.gpu(), src3.gpu(), axis=1)
pipe.set_outputs(out_cpu, out_gpu)
pipe.build()
o = pipe.run()
o = list(o)
o[1] = o[1].as_cpu()
ref = np.array([[1, 2, 3, 4, 13, 14, 15],
[5, 6, 7, 8, 16, 17, 18],
[9, 10, 11, 12, 19, 20, 21]])
assert np.array_equal(o[0].at(0), ref)
assert np.array_equal(o[1].at(0), ref)
def test_cat_all_empty():
pipe = dali.pipeline.Pipeline(batch_size=1, num_threads=3, device_id=0)
with pipe:
src1 = dali.types.Constant(np.array(
[[],
[],
[]], dtype=np.int32))
out_cpu = fn.cat(src1, src1, src1, axis=1)
out_gpu = fn.cat(src1.gpu(), src1.gpu(), src1.gpu(), axis=1)
pipe.set_outputs(out_cpu, out_gpu)
pipe.build()
o = pipe.run()
o = list(o)
o[1] = o[1].as_cpu()
ref = np.array([[], [], []], dtype=np.int32)
assert np.array_equal(o[0].at(0), ref)
assert np.array_equal(o[1].at(0), ref)
def ref_cat(input_batches, axis):
N = len(input_batches[0])
out = []
for i in range(N):
inputs = [x[i] for x in input_batches]
out.append(np.concatenate(inputs, axis=axis))
return out
def ref_stack(input_batches, axis):
N = len(input_batches[0])
out = []
for i in range(N):
inputs = [x[i] for x in input_batches]
out.append(np.stack(inputs, axis=axis))
return out
def _run_test_cat(num_inputs, layout, ndim, axis, axis_name):
num_iter = 3
batch_size = 4
if ndim is None:
ndim = len(layout)
ref_axis = layout.find(axis_name) if axis_name is not None else axis if axis is not None else 0
assert ref_axis >= -ndim and ref_axis < ndim
axis_arg = None if axis_name else axis
pipe = dali.pipeline.Pipeline(batch_size=batch_size, num_threads=3, device_id=0)
with pipe:
inputs = fn.external_source(
input_generator(num_inputs, batch_size, ndim, ref_axis),
num_outputs=num_inputs, layout=layout)
out_cpu = fn.cat(*inputs, axis=axis_arg, axis_name=axis_name)
out_gpu = fn.cat(*(x.gpu() for x in inputs), axis=axis_arg, axis_name=axis_name)
pipe.set_outputs(out_cpu, out_gpu, *inputs)
pipe.build()
for iter in range(num_iter):
o_cpu, o_gpu, *inputs = pipe.run()
ref = ref_cat(inputs, ref_axis)
check_batch(o_cpu, ref, batch_size, eps=0, expected_layout=layout)
check_batch(o_gpu, ref, batch_size, eps=0, expected_layout=layout)
def _run_test_stack(num_inputs, layout, ndim, axis, axis_name):
num_iter = 3
batch_size = 4
if ndim is None:
ndim = len(layout)
ref_axis = axis if axis is not None else 0
assert ref_axis >= -ndim and ref_axis <= ndim
if axis_name:
axis_pos = axis + ndim + 1 if axis < 0 else axis
ref_layout = layout[:axis_pos] + axis_name + layout[axis_pos:] if layout else axis_name
else:
ref_layout = ""
pipe = dali.pipeline.Pipeline(batch_size=batch_size, num_threads=3, device_id=0)
with pipe:
inputs = fn.external_source(
input_generator(num_inputs, batch_size, ndim),
num_outputs=num_inputs, layout=layout)
out_cpu = fn.stack(*inputs, axis=axis, axis_name=axis_name)
out_gpu = fn.stack(*(x.gpu() for x in inputs), axis=axis, axis_name=axis_name)
pipe.set_outputs(out_cpu, out_gpu, *inputs)
pipe.build()
for _ in range(num_iter):
o_cpu, o_gpu, *inputs = pipe.run()
ref = ref_stack(inputs, ref_axis)
check_batch(o_cpu, ref, batch_size, eps=0, expected_layout=ref_layout)
check_batch(o_gpu, ref, batch_size, eps=0, expected_layout=ref_layout)
def test_cat():
for num_inputs in [1, 2, 3, 100]:
for layout, ndim in [
(None, 0), (None, 1), ('A', 1), (None, 2), ('HW', 2), (None, 3), ('DHW', 3)
]:
for axis in range(-ndim, ndim):
axis_name = layout[axis] if layout else None
yield _run_test_cat, num_inputs, layout, ndim, axis, axis_name
def test_stack():
for num_inputs in [1, 2, 3, 100]:
for layout, ndim in [
(None, 0), (None, 1), ('A', 1), (None, 2), ('HW', 2), (None, 3), ('DHW', 3)
]:
for axis in range(-ndim, ndim + 1):
axis_names = [None] if layout is None and ndim > 0 else [None, 'C']
for axis_name in axis_names:
yield _run_test_stack, num_inputs, layout, ndim, axis, axis_name
|
DALI-main
|
dali/test/python/operator_1/test_join.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import numpy as np
from functools import partial
from test_utils import compare_pipelines
from test_utils import RandomDataIterator
class ErasePipeline(Pipeline):
def __init__(self, device, batch_size, layout, iterator,
anchor, shape, axis_names, axes, fill_value,
normalized_anchor=False, normalized_shape=False,
num_threads=1, device_id=0, num_gpus=1):
super(ErasePipeline, self).__init__(batch_size, num_threads, device_id)
self.device = device
self.layout = layout
self.iterator = iterator
self.inputs = ops.ExternalSource()
if isinstance(fill_value, RandomDataIterator):
self.fill_value_iterator = fill_value
self.fill_value_inputs = ops.ExternalSource()
fill_value = None
else:
self.fill_value_iterator = None
self.erase = ops.Erase(device=self.device,
anchor=anchor,
shape=shape,
axis_names=axis_names,
axes=axes,
fill_value=fill_value,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape)
def define_graph(self):
self.data = self.inputs()
random_data = self.data.gpu() if self.device == 'gpu' else self.data
if self.fill_value_iterator is not None:
self.fill_value_data = self.fill_value_inputs()
out = self.erase(random_data, fill_value=self.fill_value_data)
else:
out = self.erase(random_data)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
if self.fill_value_iterator is not None:
fill_value_data = self.fill_value_iterator.next()
self.feed_input(self.fill_value_data, fill_value_data)
def get_axes(layout, axis_names):
axes = []
for axis_name in axis_names:
axis_idx = layout.find(axis_name)
assert axis_idx >= 0
axes.append(axis_idx)
return axes
def get_regions(in_shape, axes, arg_anchor, arg_shape):
assert len(arg_shape) % len(axes) == 0
nregions = int(len(arg_shape) / len(axes))
region_length = int(len(arg_shape) / nregions)
starts = []
ends = []
for region_idx in range(nregions):
start_i = [0] * len(in_shape)
end_i = list(in_shape)
for k in range(region_length):
axis = axes[k]
anchor_val = arg_anchor[region_idx * region_length + k]
shape_val = arg_shape[region_idx * region_length + k]
end_val = anchor_val + shape_val
start_i[axis] = anchor_val
end_i[axis] = end_val
starts.append(start_i)
ends.append(end_i)
return (starts, ends)
def erase_func(anchor, shape, axis_names, axes, layout, fill_value, image):
assert len(anchor) == len(shape)
if not axes:
axes = get_axes(layout, axis_names)
if fill_value is None:
fill_value = 0
roi_starts, roi_ends = get_regions(image.shape, axes, anchor, shape)
assert len(roi_starts) == len(roi_ends)
for region_idx in range(len(roi_starts)):
start = roi_starts[region_idx]
end = roi_ends[region_idx]
assert len(start) == len(end)
if len(start) == 3:
image[start[0]:end[0], start[1]:end[1], start[2]:end[2]] = fill_value
elif len(start) == 4:
image[start[0]:end[0], start[1]:end[1], start[2]:end[2], start[3]:end[3]] = fill_value
else:
assert False
return image
class ErasePythonPipeline(Pipeline):
def __init__(self, function, batch_size, data_layout, iterator,
anchor, shape, axis_names, axes, fill_value,
erase_func=erase_func,
num_threads=1, device_id=0):
super(ErasePythonPipeline, self).__init__(batch_size,
num_threads,
device_id,
exec_async=False,
exec_pipelined=False)
self.iterator = iterator
self.inputs = ops.ExternalSource()
self.data_layout = data_layout
if isinstance(fill_value, RandomDataIterator):
self.fill_value_iterator = fill_value
self.fill_value_inputs = ops.ExternalSource()
fill_value = None
function = partial(erase_func, anchor, shape, axis_names, axes, data_layout)
else:
self.fill_value_iterator = None
function = partial(erase_func, anchor, shape, axis_names, axes, data_layout, fill_value)
self.erase = ops.PythonFunction(function=function, output_layouts=data_layout)
def define_graph(self):
self.data = self.inputs()
if self.fill_value_iterator is not None:
self.fill_value_data = self.fill_value_inputs()
out = self.erase(self.fill_value_data, self.data)
else:
out = self.erase(self.data)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data)
if self.fill_value_iterator is not None:
fill_value_data = self.fill_value_iterator.next()
self.feed_input(self.fill_value_data, fill_value_data)
def check_operator_erase_vs_python(device, batch_size, input_shape,
anchor, shape, axis_names, axes, input_layout, fill_value):
eii1 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
eii2 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
fill_value_arg1 = fill_value
fill_value_arg2 = fill_value
if fill_value == 'random':
fill_eii1 = RandomDataIterator(batch_size, shape=input_shape[-1:], dtype=np.float32)
fill_eii2 = RandomDataIterator(batch_size, shape=input_shape[-1:], dtype=np.float32)
fill_value_arg1 = iter(fill_eii1)
fill_value_arg2 = iter(fill_eii2)
compare_pipelines(
ErasePipeline(device, batch_size, input_layout, iter(eii1), anchor=anchor,
shape=shape, axis_names=axis_names, axes=axes,
fill_value=fill_value_arg1),
ErasePythonPipeline(device, batch_size, input_layout, iter(eii2), anchor=anchor,
shape=shape, axis_names=axis_names, axes=axes,
fill_value=fill_value_arg2),
batch_size=batch_size, N_iterations=3, eps=1e-04, expected_layout=input_layout)
def test_operator_erase_vs_python():
# layout, shape, axis_names, axes, anchor, shape, fill_value
rois = [("HWC", (60, 80, 3), "HW", None, (4, 10), (40, 50), 0),
("HWC", (60, 80, 3), "HW", None, (4, 10), (40, 50), None),
("HWC", (60, 80, 3), "HW", None, (4, 2, 3, 4), (50, 10, 10, 50), -1),
("HWC", (60, 80, 3), "HW", None, (4, 2, 3, 4), (50, 10, 10, 50), (118, 185, 0)),
("HWC", (60, 80, 3), "HW", None, (4, 2, 3, 4), (50, 10, 10, 50), "random"),
("HWC", (60, 80, 3), "H", None, (4,), (7,), 0),
("HWC", (60, 80, 3), "H", None, (4, 15), (7, 8), 0),
("HWC", (60, 80, 3), "W", None, (4,), (7,), 0),
("HWC", (60, 80, 3), "W", None, (4, 15), (7, 8), 0),
("HWC", (60, 80, 3), "W", None, (4, 15), (7, 8), "random"),
("HWC", (60, 80, 3), None, (0, 1), (4, 10), (40, 50), 0),
("HWC", (60, 80, 3), None, (0, 1), (4, 2, 3, 4), (50, 10, 10, 50), 0),
("HWC", (60, 80, 3), None, (0,), (4,), (7,), 0),
("HWC", (60, 80, 3), None, (0,), (4, 15), (7, 8), 0),
("HWC", (60, 80, 3), None, (1,), (4,), (7,), 0),
("HWC", (60, 80, 3), None, (1,), (4, 15), (7, 8), 0),
("HWC", (60, 80, 3), None, (1,), (4, 15), (7, 8), "random"),
("DHWC", (10, 60, 80, 3), "DHW", None, (2, 4, 15), (3, 7, 8), 0),
("HWC", (60, 80, 1), "HW", None, (4, 15), (7, 8), 0),
("XYZ", (60, 80, 3), "XY", None, (4, 10), (40, 50), -1), ]
for device in ['cpu']:
for batch_size in [1, 8]:
for input_layout, input_shape, axis_names, axes, anchor, shape, fill_value in rois:
assert len(input_layout) == len(input_shape)
assert len(anchor) == len(shape)
if axis_names:
assert axes is None
assert len(anchor) % len(axis_names) == 0
else:
assert len(axes) > 0
assert len(anchor) % len(axes) == 0
yield check_operator_erase_vs_python, device, batch_size, input_shape, \
anchor, shape, axis_names, axes, input_layout, fill_value
def check_operator_erase_with_normalized_coords(device, batch_size, input_shape,
anchor, shape, axis_names, input_layout,
anchor_norm, shape_norm, normalized_anchor,
normalized_shape):
eii1 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
eii2 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
compare_pipelines(
ErasePipeline(device, batch_size, input_layout, iter(eii1),
anchor=anchor_norm, shape=shape_norm,
normalized_anchor=normalized_anchor, normalized_shape=normalized_shape,
axis_names=axis_names, axes=None, fill_value=0),
ErasePipeline(device, batch_size, input_layout, iter(eii2),
anchor=anchor, shape=shape, axis_names=axis_names, axes=None, fill_value=0),
batch_size=batch_size, N_iterations=3, eps=1e-04)
def test_operator_erase_with_normalized_coords():
# layout, shape, axis_names, anchor, shape, anchor_norm, shape_norm
rois = [
("HWC", (60, 80, 3), "HW", (4, 10), (40, 50),
(4 / 60.0, 10 / 80.0), (40 / 60.0, 50 / 80.0), 0),
("HWC", (60, 80, 3), "HW", (4, 10), (40, 50),
(4 / 60.0, 10 / 80.0), (40 / 60.0, 50 / 80.0), -1),
("HWC", (60, 80, 3), "HW", (4, 10), (40, 50),
(4 / 60.0, 10 / 80.0), (40 / 60.0, 50 / 80.0), (118, 186, 0)),
("HWC", (60, 80, 3), "HW", (4, 2, 3, 4), (50, 10, 10, 50),
(4 / 60.0, 2 / 80.0, 3 / 60.0, 4 / 80.0), (50 / 60.0, 10 / 80.0, 10 / 60.0, 50 / 80.0), 0),
("HWC", (60, 80, 3), "H", (4, ), (7, ), (4 / 60.0, ), (7 / 60.0, ), 0),
("DHWC", (10, 60, 80, 3), "DHW", (2, 4, 10), (5, 40, 50), (2 / 10.0, 4 / 60.0, 10 / 80.0),
(5 / 10.0, 40 / 60.0, 50 / 80.0), 0),
("HWC", (60, 80, 1), "WH", (10, 4), (50, 40),
(10 / 80.0, 4 / 60.0), (50 / 80.0, 40 / 60.0), 0),
("XYZ", (60, 80, 3), "X", (4, ), (7, ), (4 / 60.0, ), (7 / 60.0, ), -10),
]
for device in ['cpu', 'gpu']:
for batch_size in [1, 8]:
for (input_layout, input_shape, axis_names, anchor, shape,
anchor_norm, shape_norm, fill_value) in rois:
assert len(input_layout) == len(input_shape)
assert len(anchor) == len(shape)
assert len(anchor) % len(axis_names) == 0
for (normalized_anchor, normalized_shape) in [
(True, True), (True, False), (False, True)
]:
anchor_norm_arg = anchor_norm if normalized_anchor else anchor
shape_norm_arg = shape_norm if normalized_shape else shape
yield check_operator_erase_with_normalized_coords, device, batch_size, \
input_shape, anchor, shape, axis_names, input_layout, \
anchor_norm_arg, shape_norm_arg, normalized_anchor, normalized_shape
def test_operator_erase_with_out_of_bounds_roi_coords():
device = 'cpu'
batch_size = 8
input_layout = "HWC"
input_shape = (60, 80, 3)
axis_names = "HW"
anchor_arg = (4, 10, 10, 4)
shape_arg = (40, 50, 50, 40)
# second region is completely out of bounds
anchor_norm_arg = (4 / 60.0, 10 / 80.0, 2000, 2000, 10 / 60.0, 4 / 80.0)
shape_norm_arg = (40 / 60.0, 50 / 80.0, 200, 200, 50 / 60.0, 40 / 80.0)
yield (check_operator_erase_with_normalized_coords,
device, batch_size,
input_shape,
anchor_arg, shape_arg,
axis_names, input_layout,
anchor_norm_arg, shape_norm_arg,
True, True)
|
DALI-main
|
dali/test/python/operator_1/test_erase.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import warnings
import numpy as np
from scipy.spatial.transform import Rotation as scipy_rotate
from nvidia.dali.pipeline import Pipeline
# Just to verify that import works as expected
import nvidia.dali.ops.transforms as _unused_import # noqa:F401
import nvidia.dali.ops as ops
import nvidia.dali.fn as fn
from nvidia.dali import pipeline_def
from sequences_test_utils import ArgData, ArgDesc, sequence_suite_helper, ArgCb, ParamsProvider
from nose_utils import assert_raises
def check_results_sample(T1, mat_ref, T0=None, reverse=False, atol=1e-6):
ndim = mat_ref.shape[0] - 1
ref_T1 = None
if T0 is not None:
mat_T0 = np.identity(ndim + 1)
mat_T0[:ndim, :] = T0
if reverse:
mat_T1 = np.dot(mat_T0, mat_ref)
else:
mat_T1 = np.dot(mat_ref, mat_T0)
ref_T1 = mat_T1[:ndim, :]
else:
ref_T1 = mat_ref[:ndim, :]
assert np.allclose(T1, ref_T1, atol=1e-6)
def check_results(T1, batch_size, mat_ref, T0=None, reverse=False, atol=1e-6):
for idx in range(batch_size):
check_results_sample(T1.at(idx), mat_ref,
T0.at(idx) if T0 is not None else None, reverse, atol)
def translate_affine_mat(offset):
ndim = len(offset)
affine_mat = np.identity(ndim + 1)
affine_mat[:ndim, -1] = offset
return affine_mat
def check_transform_translation_op(offset, has_input=False, reverse_order=False, batch_size=1,
num_threads=4, device_id=0):
ndim = len(offset)
pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id, seed=1234)
with pipe:
if has_input:
T0 = fn.random.uniform(range=(-1, 1), shape=(ndim, ndim + 1))
T1 = fn.transforms.translation(T0, device='cpu', offset=offset,
reverse_order=reverse_order)
pipe.set_outputs(T1, T0)
else:
T1 = fn.transforms.translation(device='cpu', offset=offset)
pipe.set_outputs(T1)
pipe.build()
outs = pipe.run()
ref_mat = translate_affine_mat(offset=offset)
T0 = outs[1] if has_input else None
check_results(outs[0], batch_size, ref_mat, T0, reverse_order)
def test_transform_translation_op(batch_size=3, num_threads=4, device_id=0):
for offset in [(0.0, 1.0), (2.0, 1.0, 3.0)]:
for has_input in [False, True]:
for reverse_order in [False, True] if has_input else [False]:
yield check_transform_translation_op, offset, has_input, reverse_order, \
batch_size, num_threads, device_id
def scale_affine_mat(scale, center=None, ndim=None):
if ndim is None:
ndim = len(scale)
else:
assert ndim == len(scale) or 1 == len(scale)
assert center is None or len(center) == ndim
s_mat = np.identity(ndim + 1)
for d in range(ndim):
s_mat[d, d] = scale[0] if len(scale) == 1 else scale[d]
if center is not None:
neg_offset = [-x for x in center]
t1_mat = translate_affine_mat(neg_offset)
t2_mat = translate_affine_mat(center)
affine_mat = np.dot(t2_mat, np.dot(s_mat, t1_mat))
else:
affine_mat = s_mat
return affine_mat
def check_transform_scale_op(scale, center=None, has_input=False, reverse_order=False, ndim=None,
batch_size=1, num_threads=4, device_id=0):
if ndim is None:
ndim = len(scale)
assert center is None or len(center) == ndim
pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id, seed=1234)
with pipe:
if has_input:
T0 = fn.random.uniform(range=(-1, 1), shape=(ndim, ndim + 1))
T1 = fn.transforms.scale(T0, device='cpu', scale=scale, center=center, ndim=ndim,
reverse_order=reverse_order)
pipe.set_outputs(T1, T0)
else:
T1 = fn.transforms.scale(device='cpu', scale=scale, center=center, ndim=ndim)
pipe.set_outputs(T1)
pipe.build()
outs = pipe.run()
ref_mat = scale_affine_mat(scale=scale, center=center, ndim=ndim)
T0 = outs[1] if has_input else None
check_results(outs[0], batch_size, ref_mat, T0, reverse_order)
def test_transform_scale_op(batch_size=3, num_threads=4, device_id=0):
for scale, center, ndim in [((0.0, 1.0), None, None),
((2.0, 1.0, 3.0), None, None),
((2.0, 1.0), (1.0, 0.5), None),
((2.0, ), (1.0, 0.5), 2)]:
for has_input in [False, True]:
for reverse_order in [False, True] if has_input else [False]:
yield check_transform_scale_op, scale, center, has_input, reverse_order, \
ndim, batch_size, num_threads, device_id,
def rotate_affine_mat(angle, axis=None, center=None):
assert axis is None or len(axis) == 3
ndim = 3 if axis is not None else 2
assert center is None or len(center) == ndim
angle_rad = angle * np.pi / 180.0
if ndim == 2:
c = np.cos(angle_rad)
s = np.sin(angle_rad)
r_mat = np.array(
[[c, -s, 0.],
[s, c, 0.],
[0., 0., 1.]])
else: # ndim == 3
norm_axis = axis / np.linalg.norm(axis)
r_mat = np.identity(ndim + 1)
r_mat[:ndim, :ndim] = scipy_rotate.from_rotvec(angle_rad * norm_axis).as_matrix()
if center is not None:
neg_offset = [-x for x in center]
t1_mat = translate_affine_mat(neg_offset)
t2_mat = translate_affine_mat(center)
affine_mat = np.dot(t2_mat, np.dot(r_mat, t1_mat))
else:
affine_mat = r_mat
return affine_mat
def check_transform_rotation_op(angle=None, axis=None, center=None, has_input=False,
reverse_order=False, batch_size=1, num_threads=4, device_id=0):
assert axis is None or len(axis) == 3
ndim = 3 if axis is not None else 2
assert center is None or len(center) == ndim
random_angle = angle is None
pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id, seed=12345)
with pipe:
outputs = []
if random_angle:
angle = fn.random.uniform(range=(-90, 90))
if has_input:
T0 = fn.random.uniform(range=(-1, 1), shape=(ndim, ndim + 1))
T1 = fn.transforms.rotation(T0, device='cpu', angle=angle, axis=axis, center=center,
reverse_order=reverse_order)
outputs = [T1, T0]
else:
T1 = fn.transforms.rotation(device='cpu', angle=angle, axis=axis, center=center)
outputs = [T1]
if random_angle:
outputs.append(angle)
pipe.set_outputs(*outputs)
pipe.build()
outs = pipe.run()
out_idx = 1
out_T0 = None
out_angle = None
if has_input:
out_T0 = outs[out_idx]
out_idx = out_idx + 1
if random_angle:
out_angle = outs[out_idx]
out_idx = out_idx + 1
for idx in range(batch_size):
T0 = out_T0.at(idx) if has_input else None
angle = out_angle.at(idx) if random_angle else angle
ref_mat = rotate_affine_mat(angle=angle, axis=axis, center=center)
check_results_sample(outs[0].at(idx), ref_mat, T0, reverse_order, atol=1e-6)
def test_transform_rotation_op(batch_size=3, num_threads=4, device_id=0):
for angle, axis, center in [(None, None, None),
(30.0, None, None),
(None, None, (1.0, 0.5)),
(30.0, None, (1.0, 0.5)),
(40.0, (0.4, 0.3, 0.1), None),
(40.0, (0.4, 0.3, 0.1), (1.0, -0.4, 10.0)),
(None, (0.4, 0.3, 0.1), (1.0, -0.4, 10.0))]:
for has_input in [False, True]:
for reverse_order in [False, True] if has_input else [False]:
yield check_transform_rotation_op, angle, axis, center, has_input, reverse_order, \
batch_size, num_threads, device_id
def shear_affine_mat(shear=None, angles=None, center=None):
assert shear is not None or angles is not None
if isinstance(shear, (list, tuple)):
shear = np.float32(shear)
if isinstance(angles, (list, tuple)):
angles = np.float32(angles)
if shear is None:
shear = np.tan(angles * np.pi / 180.0)
assert shear.size == 2 or shear.size == 6
ndim = 3 if shear.size == 6 else 2
assert center is None or len(center) == ndim
if ndim == 2:
sxy, syx = np.float32(shear).flatten()
s_mat = np.array(
[[1., sxy, 0.],
[syx, 1., 0.],
[0., 0., 1.]])
else: # ndim == 3
sxy, sxz, syx, syz, szx, szy = np.float32(shear).flatten()
s_mat = np.array(
[[1, sxy, sxz, 0],
[syx, 1, syz, 0],
[szx, szy, 1, 0],
[0, 0, 0, 1]])
if center is not None:
neg_offset = [-x for x in center]
t1_mat = translate_affine_mat(neg_offset)
t2_mat = translate_affine_mat(center)
affine_mat = np.dot(t2_mat, np.dot(s_mat, t1_mat))
else:
affine_mat = s_mat
return affine_mat
def check_transform_shear_op(shear=None, angles=None, center=None,
has_input=False, reverse_order=False,
batch_size=1, num_threads=4, device_id=0):
assert shear is not None or angles is not None
if shear is not None:
assert len(shear) == 2 or len(shear) == 6
ndim = 3 if len(shear) == 6 else 2
else:
assert len(angles) == 2 or len(angles) == 6
ndim = 3 if len(angles) == 6 else 2
assert center is None or len(center) == ndim
pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id, seed=1234)
with pipe:
if has_input:
T0 = fn.random.uniform(range=(-1, 1), shape=(ndim, ndim + 1))
T1 = fn.transforms.shear(T0, device='cpu', shear=shear, angles=angles, center=center,
reverse_order=reverse_order)
pipe.set_outputs(T1, T0)
else:
T1 = fn.transforms.shear(device='cpu', shear=shear, angles=angles, center=center)
pipe.set_outputs(T1)
pipe.build()
outs = pipe.run()
ref_mat = shear_affine_mat(shear=shear, angles=angles, center=center)
T0 = outs[1] if has_input else None
check_results(outs[0], batch_size, ref_mat, T0, reverse_order, atol=1e-6)
def check_transform_shear_op_runtime_args(ndim, use_angles, use_center,
has_input=False, reverse_order=False,
batch_size=1, num_threads=4, device_id=0):
pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id, seed=1234)
with pipe:
inputs = [fn.random.uniform(range=(-1, 1), shape=(ndim, ndim + 1))] if has_input else []
params = []
angles_arg = None
shear_arg = None
center_arg = None
if use_angles:
angles_arg = fn.random.uniform(range=(-80, 80), shape=[ndim, ndim - 1])
params.append(angles_arg)
else:
shear_arg = fn.random.uniform(range=(-2, 2), shape=[ndim, ndim - 1])
params.append(shear_arg)
if use_center:
center_arg = fn.random.uniform(range=(-10, 10), shape=[ndim])
params.append(center_arg)
T1 = fn.transforms.shear(*inputs, device='cpu', shear=shear_arg, angles=angles_arg,
center=center_arg, reverse_order=reverse_order)
pipe.set_outputs(T1, *inputs, *params)
pipe.build()
for _ in range(3):
outs = pipe.run()
T0 = outs[1] if has_input else None
shear_param = outs[2 if has_input else 1]
center_param = outs[3 if has_input else 2] if use_center else None
for idx in range(batch_size):
angles = None
shear = None
center = None
if use_angles:
angles = shear_param.at(idx)
else:
shear = shear_param.at(idx)
if use_center:
center = center_param.at(idx)
ref_mat = shear_affine_mat(shear=shear, angles=angles, center=center)
inp = T0.at(idx) if T0 is not None else None
check_results_sample(outs[0].at(idx), ref_mat, inp, reverse_order, atol=1e-6)
def test_transform_shear_op(batch_size=3, num_threads=4, device_id=0):
for shear, angles, center in [((1., 2.), None, None),
((1., 2.), None, (0.4, 0.5)),
((1., 2., 3., 4., 5., 6.), None, None),
((1., 2., 3., 4., 5., 6.), None, (0.4, 0.5, 0.6)),
(None, (30., 10.), None),
(None, (30., 10.), (0.4, 0.5)),
(None, (40., 30., 10., 35., 25., 15.), None),
(None, (40., 30., 10., 35., 25., 15.), (0.4, 0.5, 0.6))]:
for has_input in [False, True]:
for reverse_order in [False, True] if has_input else [False]:
yield check_transform_shear_op, shear, angles, center, has_input, reverse_order, \
batch_size, num_threads, device_id
def test_transform_shear_op_runtime_args(batch_size=3, num_threads=4, device_id=0):
for ndim in [2, 3]:
for use_angles in [False, True]:
for use_center in [False, True]:
for has_input in [False, True]:
for reverse_order in [False, True] if has_input else [False]:
yield check_transform_shear_op_runtime_args, ndim, use_angles, use_center, \
has_input, reverse_order, 4, 4
def get_ndim(from_start, from_end, to_start, to_end):
sizes = [len(a) for a in [from_start, from_end, to_start, to_end] if a is not None]
ndim = max(sizes) if len(sizes) > 0 else 1
for sz in sizes:
assert sz == ndim or sz == 1
return ndim
def expand_dims(from_start, from_end, to_start, to_end):
ndim = get_ndim(from_start, from_end, to_start, to_end)
def expand(arg, ndim, default_arg):
if arg is None:
return [default_arg] * ndim
elif len(arg) == 1:
return [arg[0]] * ndim
else:
assert len(arg) == ndim
return arg
return [
expand(from_start, ndim, 0.),
expand(from_end, ndim, 1.),
expand(to_start, ndim, 0.),
expand(to_end, ndim, 1.)
]
def crop_affine_mat(from_start, from_end, to_start, to_end, absolute=False):
from_start, from_end, to_start, to_end = (
np.array(x) for x in expand_dims(from_start, from_end, to_start, to_end))
if absolute:
from_start, from_end = np.minimum(from_start, from_end), np.maximum(from_start, from_end)
to_start, to_end = np.minimum(to_start, to_end), np.maximum(to_start, to_end)
scale = (to_end - to_start) / (from_end - from_start)
T1 = translate_affine_mat(-from_start)
S = scale_affine_mat(scale)
T2 = translate_affine_mat(to_start)
affine_mat = np.dot(T2, np.dot(S, T1))
return affine_mat
def check_transform_crop_op(from_start=None, from_end=None, to_start=None, to_end=None,
absolute=False, has_input=False, reverse_order=False,
batch_size=1, num_threads=4, device_id=0):
ndim = get_ndim(from_start, from_end, to_start, to_end)
pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id, seed=1234)
with pipe:
if has_input:
T0 = fn.random.uniform(range=(-1, 1), shape=(ndim, ndim + 1))
T1 = fn.transforms.crop(T0, device='cpu',
from_start=from_start, from_end=from_end,
to_start=to_start, to_end=to_end,
absolute=absolute,
reverse_order=reverse_order)
pipe.set_outputs(T1, T0)
else:
T1 = fn.transforms.crop(device='cpu',
from_start=from_start, from_end=from_end,
to_start=to_start, to_end=to_end,
absolute=absolute)
pipe.set_outputs(T1)
pipe.build()
outs = pipe.run()
ref_mat = crop_affine_mat(from_start, from_end, to_start, to_end, absolute=absolute)
T0 = outs[1] if has_input else None
T1 = outs[0]
check_results(T1, batch_size, ref_mat, T0, reverse_order, atol=1e-6)
if not has_input:
from_start, from_end, to_start, to_end = expand_dims(from_start, from_end, to_start, to_end)
if absolute:
from_start, from_end = np.minimum(from_start,
from_end), np.maximum(from_start, from_end)
to_start, to_end = np.minimum(to_start, to_end), np.maximum(to_start, to_end)
for idx in range(batch_size):
MT = T1.at(idx)
M, T = MT[:ndim, :ndim], MT[:, ndim]
assert np.allclose(np.dot(M, from_start) + T, to_start, atol=1e-6)
assert np.allclose(np.dot(M, from_end) + T, to_end, atol=1e-6)
def test_transform_crop_op(batch_size=3, num_threads=4, device_id=0):
for from_start, from_end, to_start, to_end in \
[(None, None, None, None),
((0.1, 0.2), (1., 1.2), (0.3, 0.2), (0.5, 0.6)),
((0.1, 0.2), (0.4, 0.9), None, None),
((0.2, 0.2), None, None, None),
(None, (0.4, 0.9), None, None),
((0.1, 0.2, 0.3), (1., 1.2, 1.3), (0.3, 0.2, 0.1), (0.5, 0.6, 0.7)),
((0.1, 0.2, 0.3), (1., 1.2, 1.3), None, None)]:
for has_input in [False, True]:
for reverse_order in [False, True] if has_input else [False]:
yield check_transform_crop_op, from_start, from_end, to_start, to_end, \
False, has_input, reverse_order, \
batch_size, num_threads, device_id
# Reversed start and end
for absolute in [False, True]:
yield check_transform_crop_op, from_end, from_start, to_end, to_start, \
absolute, has_input, reverse_order, \
batch_size, num_threads, device_id
def check_combine_transforms(num_transforms=2, ndim=2, reverse_order=False,
batch_size=1, num_threads=4, device_id=0):
pipe = Pipeline(batch_size=batch_size, num_threads=num_threads, device_id=device_id)
with pipe:
transforms = [
fn.random.uniform(range=(-1, 1), shape=(ndim, ndim + 1), seed=1234)
for _ in range(num_transforms)
]
T = fn.transforms.combine(*transforms)
pipe.set_outputs(T, *transforms)
pipe.build()
outs = pipe.run()
for idx in range(batch_size):
num_mats = len(outs) - 1
assert num_mats >= 2
mats = [np.identity(ndim + 1) for _ in range(num_mats)]
for in_idx in range(len(mats)):
mats[in_idx][:ndim, :] = outs[1 + in_idx].at(idx)
# by default we want to access them in opposite order
if not reverse_order:
mats.reverse()
ref_mat = np.identity(ndim + 1)
for mat in mats:
ref_mat = np.dot(mat, ref_mat)
assert np.allclose(outs[0].at(idx), ref_mat[:ndim, :], atol=1e-6)
def test_combine_transforms(batch_size=3, num_threads=4, device_id=0):
for num_transforms in [2, 3, 10]:
for ndim in [2, 3, 6]:
for reverse_order in [False, True]:
yield check_combine_transforms, num_transforms, ndim, reverse_order, \
batch_size, num_threads, device_id
def test_combine_transforms_correct_order():
batch_size = 3
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
with pipe:
import nvidia.dali.fn.transforms as T
t1 = T.translation(offset=(1, 2))
t2 = T.rotation(angle=30.0)
t12 = T.rotation(T.translation(offset=(1, 2)), angle=30.0)
t21 = T.translation(T.rotation(angle=30.0), offset=(1, 2))
pipe.set_outputs(T.combine(t1, t2), t12, T.combine(t1, t2, reverse_order=True), t21)
pipe.build()
outs = pipe.run()
for idx in range(batch_size):
assert np.allclose(outs[0].at(idx), outs[1].at(idx), atol=1e-6)
assert np.allclose(outs[2].at(idx), outs[3].at(idx), atol=1e-6)
def verify_deprecation(callback):
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
callback()
# Verify DeprecationWarning
expected_warning = ("WARNING: `transform_translation` is now deprecated."
" Use `transforms.translation` instead.")
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert expected_warning == str(w[-1].message)
def test_transform_translation_deprecation():
verify_deprecation(lambda: fn.transform_translation(offset=(0, 0)))
verify_deprecation(lambda: ops.TransformTranslation(offset=(0, 0))())
def test_sequences():
np_rng = np.random.default_rng(12345)
rng = random.Random(42)
num_iters = 4
max_num_frames = 50
max_batch_size = 12
class TransformsParamsProvider(ParamsProvider):
def unfold_output_layout(self, layout):
unfolded = super().unfold_output_layout(layout)
if unfolded == "**":
return ""
return unfolded
def rand_range(limit):
return range(rng.randint(1, limit) + 1)
def mt(desc):
return np.float32(np_rng.uniform(-20, 20, (2, 3)))
def scale(desc):
return np.array([rng.randint(0, 5), rng.randint(-50, 20)], dtype=np.float32)
def shift(desc):
return np.array([rng.randint(-100, 200), rng.randint(-50, 20)], dtype=np.float32)
def shear_angles(desc):
return np.array([rng.randint(-90, 90), rng.randint(-90, 90)], dtype=np.float32)
def angle(desc):
return np.array(rng.uniform(-180, 180), dtype=np.float32)
def per_frame_input(frame_cb):
return [[
np.array([frame_cb(None) for _ in rand_range(max_num_frames)], dtype=np.float32)
for _ in rand_range(max_batch_size)]
for _ in range(num_iters)]
test_cases = [
(fn.transforms.rotation, {}, TransformsParamsProvider(
[ArgCb("angle", angle, True)]), ["cpu"]),
(fn.transforms.rotation, {'reverse_order': True}, TransformsParamsProvider(
[ArgCb("center", shift, True), ArgCb("angle", angle, False)]), ["cpu"]),
(fn.transforms.scale, {}, TransformsParamsProvider(
[ArgCb("scale", scale, True), ArgCb("center", shift, False)]), ["cpu"]),
(fn.transforms.scale, {
"center": np.array([-50, 100], dtype=np.float32)
}, TransformsParamsProvider([ArgCb("scale", scale, True)]), ["cpu"]),
(fn.transforms.translation, {}, TransformsParamsProvider(
[ArgCb("offset", shift, True)]), ["cpu"]),
(fn.transforms.shear, {}, TransformsParamsProvider(
[ArgCb("angles", shear_angles, True)]), ["cpu"]),
(fn.transforms.shear, {}, TransformsParamsProvider(
[ArgCb("shear", shift, True)]), ["cpu"]),
(fn.transforms.combine, {}, TransformsParamsProvider(
[ArgCb(2, mt, True), ArgCb(1, mt, False)]), ["cpu"]),
(fn.transforms.combine, {}, TransformsParamsProvider(
[ArgCb(1, mt, True)]), ["cpu"]),
]
only_with_seq_input_cases = [
(fn.transforms.combine, {}, TransformsParamsProvider(
[ArgCb(1, mt, True), ArgCb(2, mt, True)]), ["cpu"]),
(fn.transforms.combine, {}, TransformsParamsProvider(
[ArgCb(1, mt, False)]), ["cpu"]),
(fn.transforms.translation, {}, TransformsParamsProvider(
[ArgCb("offset", shift, False)]), ["cpu"]),
(fn.transforms.rotation, {
'reverse_order': True,
"angle": 92.
}, TransformsParamsProvider([]), ["cpu"]),
]
seq_cases = test_cases + only_with_seq_input_cases
main_input = ArgData(
desc=ArgDesc(0, "F", "", "F**"),
data=per_frame_input(mt)
)
yield from sequence_suite_helper(rng, [main_input], seq_cases, num_iters)
# transform the test cases to test the transforms with per-frame args but:
# 1. with the positional input that does not contain frames
# 2. without the positional input
for tested_fn, fixed_params, params_provider, devices in test_cases:
[main_source, *rest_cbs] = params_provider.input_params
if main_source.desc.expandable_prefix != "F":
continue
broadcast_0_pos_case_params = TransformsParamsProvider([ArgCb(0, mt, False), *rest_cbs])
broadcast_0_pos_case = (tested_fn, fixed_params, broadcast_0_pos_case_params, devices)
if any(source.desc.is_positional_arg for source in params_provider.input_params):
cases = [broadcast_0_pos_case]
else:
no_pos_case_params = TransformsParamsProvider(rest_cbs)
no_pos_input_case = (tested_fn, fixed_params, no_pos_case_params, devices)
cases = [broadcast_0_pos_case, no_pos_input_case]
per_frame_data = per_frame_input(main_source.cb)
data_dim = len(per_frame_data[0][0].shape)
assert data_dim > 0
data_layout = "F" + "*" * (data_dim - 1)
main_input = ArgData(
desc=ArgDesc(main_source.desc.name, "F", "", data_layout),
data=per_frame_data)
yield from sequence_suite_helper(rng, [main_input], cases, num_iters)
def test_combine_shape_mismatch():
np_rng = np.random.default_rng(42)
batch_size = 8
num_frames = 50
def mt():
return np.float32(np_rng.uniform(-100, 250, (num_frames, 2, 3)))
batch0_inp = [mt() for _ in range(batch_size)]
batch1_inp = [mt()[i:] for i in range(batch_size)]
expected_msg = "The input 0 and the input 1 have different number of frames for sample 1"
with assert_raises(RuntimeError, glob=expected_msg):
@pipeline_def
def pipeline():
mts0, mts1 = fn.external_source(lambda _: (batch0_inp, batch1_inp), num_outputs=2)
return fn.transforms.combine(fn.per_frame(mts0), fn.per_frame(mts1))
pipe = pipeline(batch_size=batch_size, num_threads=4, device_id=0)
pipe.build()
pipe.run()
def test_rotate_shape_mismatch():
np_rng = np.random.default_rng(42)
batch_size = 8
num_frames = 50
def mt():
return np.float32(np_rng.uniform(-100, 250, (num_frames, 2, 3)))
mts_inp = [mt() for _ in range(batch_size)]
angles_inp = [np.array([angle for angle in range(num_frames - i)],
dtype=np.float32) for i in range(batch_size)]
centers_inp = [np.array([c for c in range(num_frames + i)], dtype=np.float32)
for i in range(batch_size)]
with assert_raises(RuntimeError, glob="The sample 1 of tensor argument `angle` "
"contains 49 per-frame parameters, but there are 50 frames in "
"the corresponding sample of input 0."):
@pipeline_def
def pipeline():
mts, angles = fn.external_source(lambda _: (mts_inp, angles_inp), num_outputs=2)
return fn.transforms.rotation(fn.per_frame(mts), angle=fn.per_frame(angles))
pipe = pipeline(batch_size=batch_size, num_threads=4, device_id=0)
pipe.build()
pipe.run()
with assert_raises(RuntimeError, glob="The sample 1 of tensor argument `center` contains 51 "
"per-frame parameters, but there are 49 frames in the corresponding sample "
"of argument `angle`"):
@pipeline_def
def pipeline():
angles, centers = fn.external_source(lambda _: (angles_inp, centers_inp), num_outputs=2)
return fn.transforms.rotation(angle=fn.per_frame(angles), center=fn.per_frame(centers))
pipe = pipeline(batch_size=batch_size, num_threads=4, device_id=0)
pipe.build()
pipe.run()
|
DALI-main
|
dali/test/python/operator_1/test_affine_transforms.py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import fn, pipeline_def
from test_utils import RandomlyShapedDataIterator, to_array
import numpy as np
batch_size = 10
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=0)
def copy_pipe(shape, layout, dev, dtype):
min_shape = [s // 2 if s > 1 else 1 for s in shape]
min_shape = tuple(min_shape)
input = fn.external_source(source=RandomlyShapedDataIterator(batch_size,
min_shape=min_shape,
max_shape=shape,
dtype=dtype),
layout=layout)
if dev == "gpu":
input = input.gpu()
output = fn.copy(input)
return input, output
def check_copy(shape, layout, dev, dtype=np.uint8):
pipe = copy_pipe(shape, layout, dev, dtype)
pipe.build()
for i in range(10):
input, output = pipe.run()
for i in range(batch_size):
assert output[i].layout() == input[i].layout()
expected = to_array(input[i])
obtained = to_array(output[i])
np.testing.assert_array_equal(expected, obtained)
def test_copy():
for shape, layout in [([4, 2, 3], "HWC"), ([6, 1], "FX"), ([8, 10, 10, 3], "FHWC")]:
for device in ["cpu", "gpu"]:
for dtype in [np.uint8, np.float16, np.int32]:
yield check_copy, shape, layout, device, dtype
|
DALI-main
|
dali/test/python/operator_1/test_copy.py
|
# Copyright (c) 2017-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
from nvidia.dali import fn, pipeline_def
import nvidia.dali.ops as ops
from test_utils import RandomlyShapedDataIterator, to_array
from nose_utils import assert_raises
import numpy as np
def test_element_extract_operator():
batch_size = 4
F = 10
W = 32
H = 32
C = 3
test_data = []
for _ in range(batch_size):
test_data.append(np.array(np.random.rand(F, H, W, C) * 255, dtype=np.uint8))
class ExternalInputIterator(object):
def __init__(self, batch_size):
self.batch_size = batch_size
def __iter__(self):
self.i = 0
self.n = self.batch_size
return self
def __next__(self):
batch = test_data
self.i = (self.i + 1) % self.n
return (batch)
next = __next__
eii = ExternalInputIterator(batch_size)
iterator = iter(eii)
class ElementExtractPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(ElementExtractPipeline, self).__init__(batch_size, num_threads, device_id)
self.inputs = ops.ExternalSource()
# Extract first element in each sample
self.element_extract_first = ops.ElementExtract(element_map=[0])
# Extract last element in each sample
self.element_extract_last = ops.ElementExtract(element_map=[F - 1])
# Extract both first and last element in each sample to two separate outputs
self.element_extract_first_last = ops.ElementExtract(element_map=[0, F - 1])
def define_graph(self):
self.sequences = self.inputs()
first_element_1 = self.element_extract_first(self.sequences)
last_element_1 = self.element_extract_last(self.sequences)
first_element_2, last_element_2 = self.element_extract_first_last(self.sequences)
return (first_element_1, last_element_1, first_element_2, last_element_2)
def iter_setup(self):
sequences = iterator.next()
self.feed_input(self.sequences, sequences)
pipe = ElementExtractPipeline(batch_size, 1, 0)
pipe.build()
pipe_out = pipe.run()
output1, output2, output3, output4 = pipe_out
assert len(output1) == batch_size
assert len(output2) == batch_size
assert len(output3) == batch_size
assert len(output4) == batch_size
for i in range(batch_size):
out1 = output1.at(i)
out2 = output2.at(i)
out3 = output3.at(i)
out4 = output4.at(i)
expected_first = test_data[i][0]
assert out1.shape == out3.shape
np.testing.assert_array_equal(expected_first, out1)
np.testing.assert_array_equal(expected_first, out3)
expected_last = test_data[i][F - 1]
assert out2.shape == out4.shape
np.testing.assert_array_equal(expected_last, out2)
np.testing.assert_array_equal(expected_last, out4)
batch_size = 8
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=0)
def element_extract_pipe(shape, layout, element_map, dev, dtype):
min_shape = [s // 2 if s > 1 else 1 for s in shape]
min_shape[0] = shape[0]
min_shape = tuple(min_shape)
input = fn.external_source(source=RandomlyShapedDataIterator(batch_size,
min_shape=min_shape,
max_shape=shape,
dtype=dtype),
layout=layout)
if dev == "gpu":
input = input.gpu()
elements = fn.element_extract(input, element_map=element_map)
result = (input,) + tuple(elements) if len(element_map) > 1 else (input, elements)
return result
def check_element_extract(shape, layout, element_map, dev, dtype=np.uint8):
pipe = element_extract_pipe(shape, layout, element_map, dev, dtype)
pipe.build()
for i in range(10):
results = pipe.run()
input = results[0]
elements = results[1:]
for i in range(batch_size):
for j, idx in enumerate(element_map):
assert elements[j][i].layout() == layout[1:]
expected = to_array(input[i])[idx]
obtained = to_array(elements[j][i])
np.testing.assert_array_equal(expected, obtained)
def test_element_extract_layout():
for shape, layout in [([4, 2, 2], "FHW"), ([6, 1], "FX"), ([8, 10, 10, 3], "FHWC")]:
for element_map in [[1, 3], [0], [2, 2], [0, 1, 2]]:
for device in ["cpu", "gpu"]:
for dtype in [np.uint8, np.int32]:
yield check_element_extract, shape, layout, element_map, device, dtype
for device in ["cpu", "gpu"]:
yield check_element_extract, [4, 3, 3], "FXY", [0, 1, 2, 3, 3, 2, 1, 0], device
def test_raises():
with assert_raises(RuntimeError,
glob="Input must have at least two dimensions - outermost for sequence and"
" at least one for data elements."):
check_element_extract([4], "F", [1, 3], "cpu")
for shape, layout in [([6, 1], "XF"), ([8, 10, 3], "HWC")]:
with assert_raises(RuntimeError,
glob="Input layout must describe a sequence - it must start with 'F',"
" got '*' instead."):
check_element_extract(shape, layout, [1, 3], "cpu")
with assert_raises(RuntimeError,
glob="Index `10` from `element_map` is out of bounds for sample with"
" sequence length equal `6`"):
check_element_extract([6, 1], "FX", [10], "cpu")
with assert_raises(RuntimeError,
glob="Negative indices in `element_map` are not allowed, found: -5"):
check_element_extract([6, 1], "FX", [-5], "cpu")
|
DALI-main
|
dali/test/python/operator_1/test_element_extract.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.fn as fn
import numpy as np
from test_utils import RandomDataIterator
class CoordFlipPipeline(Pipeline):
def __init__(self, device, batch_size, iterator, layout,
center_x=None, center_y=None, center_z=None,
num_threads=1, device_id=0):
super(CoordFlipPipeline, self).__init__(batch_size, num_threads, device_id)
self.device = device
self.iterator = iterator
self.coord_flip = ops.CoordFlip(device=self.device, layout=layout,
center_x=center_x, center_y=center_y, center_z=center_z)
self.flip_x = ops.random.CoinFlip(probability=0.5)
self.flip_y = ops.random.CoinFlip(probability=0.5)
self.flip_z = ops.random.CoinFlip(probability=0.5) if len(layout) == 3 else None
def define_graph(self):
inputs = fn.external_source(lambda: next(self.iterator))
inputs = 0.5 + inputs # Make it fit the range [0.0, 1.0]
out = inputs.gpu() if self.device == 'gpu' else inputs
flip_x = self.flip_x()
flip_y = self.flip_y()
flip_z = self.flip_z() if self.flip_z is not None else None
out = self.coord_flip(out, flip_x=flip_x, flip_y=flip_y, flip_z=flip_z)
outputs = [inputs, out, flip_x, flip_y]
if flip_z is not None:
outputs.append(flip_z)
return outputs
def check_operator_coord_flip(device, batch_size, layout, shape, center_x, center_y, center_z):
eii1 = RandomDataIterator(batch_size, shape=shape, dtype=np.float32)
pipe = CoordFlipPipeline(device, batch_size, iter(eii1),
layout, center_x, center_y, center_z)
pipe.build()
for i in range(30):
outputs = pipe.run()
for sample in range(batch_size):
in_coords = outputs[0].at(sample)
if device == 'gpu':
out_coords = outputs[1].as_cpu().at(sample)
else:
out_coords = outputs[1].at(sample)
if in_coords.shape == () or in_coords.shape[0] == 0:
assert out_coords.shape == () or out_coords.shape[0] == 0
continue
flip_x = outputs[2].at(sample)
flip_y = outputs[3].at(sample)
flip_z = None
if len(layout) == 3:
flip_z = outputs[4].at(sample)
_, ndim = in_coords.shape
flip_dim = [flip_x, flip_y]
if ndim == 3:
flip_dim.append(flip_z)
center_dim = [center_x, center_y]
if ndim == 3:
center_dim.append(center_z)
expected_out_coords = np.copy(in_coords)
for d in range(ndim):
if flip_dim[d]:
expected_out_coords[:, d] = 2 * center_dim[d] - in_coords[:, d]
np.testing.assert_allclose(out_coords[:, d], expected_out_coords[:, d])
def test_operator_coord_flip():
for device in ['cpu', 'gpu']:
for batch_size in [1, 3]:
layout_shape_values = [("x", (10, 1)), ("xy", (10, 2)), ("xyz", (10, 3))]
if device == 'cpu':
layout_shape_values.append(("xy", (0, 2)))
for layout, shape in layout_shape_values:
for center_x, center_y, center_z in [(0.5, 0.5, 0.5), (0.0, 1.0, -0.5)]:
yield (check_operator_coord_flip, device, batch_size, layout,
shape, center_x, center_y, center_z)
|
DALI-main
|
dali/test/python/operator_1/test_coord_flip.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import numpy as np
from functools import partial
from test_utils import compare_pipelines
from test_utils import RandomlyShapedDataIterator
import librosa as librosa
class MelFilterBankPipeline(Pipeline):
def __init__(self, device, batch_size, iterator, nfilter, sample_rate, freq_low, freq_high,
normalize, mel_formula, layout='ft', num_threads=1, device_id=0):
super(MelFilterBankPipeline, self).__init__(batch_size, num_threads, device_id)
self.device = device
self.iterator = iterator
self.inputs = ops.ExternalSource()
self.fbank = ops.MelFilterBank(device=self.device,
nfilter=nfilter,
sample_rate=sample_rate,
freq_low=freq_low,
freq_high=freq_high,
normalize=normalize,
mel_formula=mel_formula)
self.layout = layout
def define_graph(self):
self.data = self.inputs()
out = self.data.gpu() if self.device == 'gpu' else self.data
out = self.fbank(out)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
def mel_fbank_func(nfilter, sample_rate, freq_low, freq_high, normalize, mel_formula, input_data):
in_shape = input_data.shape
axis = -2 if len(in_shape) > 1 else 0
fftbin_size = in_shape[axis]
nfft = 2 * (fftbin_size - 1)
librosa_norm = 'slaney' if normalize else None
librosa_htk = (mel_formula == 'htk')
mel_transform = librosa.filters.mel(
sr=sample_rate, n_mels=nfilter, n_fft=nfft,
fmin=freq_low, fmax=freq_high,
norm=librosa_norm, dtype=np.float32, htk=librosa_htk)
out_shape = list(in_shape)
out_shape[axis] = nfilter
out_shape = tuple(out_shape)
out = np.zeros(out_shape, dtype=np.float32)
if len(in_shape) == 3:
for i in range(in_shape[0]):
out[i, :, :] = np.dot(mel_transform, input_data[i, :, :])
elif len(in_shape) <= 2:
out = np.dot(mel_transform, input_data)
return out
class MelFilterBankPythonPipeline(Pipeline):
def __init__(self, device, batch_size, iterator, nfilter, sample_rate, freq_low, freq_high,
normalize, mel_formula, layout='ft', num_threads=1, device_id=0,
func=mel_fbank_func):
super(MelFilterBankPythonPipeline, self).__init__(
batch_size, num_threads, device_id,
seed=12345, exec_async=False, exec_pipelined=False)
self.device = "cpu"
self.iterator = iterator
self.inputs = ops.ExternalSource()
function = partial(func, nfilter, sample_rate, freq_low, freq_high, normalize, mel_formula)
self.mel_fbank = ops.PythonFunction(function=function)
self.layout = layout
self.freq_major = layout.find('f') != len(layout) - 1
self.need_transpose = not self.freq_major and len(layout) > 1
if self.need_transpose:
perm = [i for i in range(len(layout))]
f = layout.find('f')
perm[f] = len(layout) - 2
perm[-2] = f
self.transpose = ops.Transpose(perm=perm)
def _transposed(self, op):
return lambda x: self.transpose(op(self.transpose(x)))
def define_graph(self):
self.data = self.inputs()
mel_fbank = self._transposed(self.mel_fbank) if self.need_transpose else self.mel_fbank
out = mel_fbank(self.data)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
def check_operator_mel_filter_bank_vs_python(device, batch_size, max_shape,
nfilter, sample_rate, freq_low, freq_high,
normalize, mel_formula, layout):
f_axis = layout.find('f')
min_shape = [1 for _ in max_shape]
min_shape[f_axis] = max_shape[f_axis]
eii1 = RandomlyShapedDataIterator(
batch_size, min_shape=min_shape, max_shape=max_shape, dtype=np.float32)
eii2 = RandomlyShapedDataIterator(
batch_size, min_shape=min_shape, max_shape=max_shape, dtype=np.float32)
compare_pipelines(
MelFilterBankPipeline(
device, batch_size, iter(eii1),
nfilter=nfilter, sample_rate=sample_rate, freq_low=freq_low, freq_high=freq_high,
normalize=normalize, mel_formula=mel_formula, layout=layout),
MelFilterBankPythonPipeline(
device, batch_size, iter(eii2),
nfilter=nfilter, sample_rate=sample_rate, freq_low=freq_low, freq_high=freq_high,
normalize=normalize, mel_formula=mel_formula, layout=layout),
batch_size=batch_size, N_iterations=3, eps=1e-03)
def test_operator_mel_filter_bank_vs_python():
for device in ['cpu', 'gpu']:
for batch_size in [1, 3]:
for normalize in [True, False]:
for mel_formula in ['htk', 'slaney']:
for nfilter, sample_rate, freq_low, freq_high, shape, layout in \
[(4, 16000.0, 0.0, 8000.0, (17,), 'f'),
(4, 16000.0, 0.0, 8000.0, (17, 1), 'ft'),
(128, 16000.0, 0.0, 8000.0, (513, 100), 'ft'),
(128, 48000.0, 0.0, 24000.0, (513, 100), 'ft'),
(128, 16000.0, 0.0, 8000.0, (10, 513, 100), 'Ctf'),
(128, 48000.0, 4000.0, 24000.0, (513, 100), 'tf'),
(128, 44100.0, 0.0, 22050.0, (513, 100), 'tf'),
(128, 44100.0, 1000.0, 22050.0, (513, 100), 'tf')]:
yield check_operator_mel_filter_bank_vs_python, device, batch_size, shape, \
nfilter, sample_rate, freq_low, freq_high, normalize, mel_formula, \
layout
|
DALI-main
|
dali/test/python/operator_1/test_mel_filter_bank.py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import pipeline_def
import os
import numpy as np
from nvidia.dali import fn
from test_utils import get_dali_extra_path
from nose_utils import raises
import tempfile
test_data_root = get_dali_extra_path()
def _uint8_tensor_to_string(t):
return np.array(t).tobytes().decode()
@pipeline_def
def file_properties(files, device):
read, _ = fn.readers.file(files=files)
if device == 'gpu':
read = read.gpu()
return fn.get_property(read, key="source_info")
def _test_file_properties(device):
root_path = os.path.join(test_data_root, 'db', 'single', 'png', '0')
files = [os.path.join(root_path, i) for i in os.listdir(root_path)]
p = file_properties(files, device, batch_size=8, num_threads=4, device_id=0)
p.build()
output = p.run()
for out in output:
out = out if device == 'cpu' else out.as_cpu()
for source_info, ref in zip(out, files):
assert _uint8_tensor_to_string(source_info) == ref
def test_file_properties():
for dev in ['cpu', 'gpu']:
yield _test_file_properties, dev
@pipeline_def
def wds_properties(root_path, device, idx_paths):
read = fn.readers.webdataset(paths=[root_path], index_paths=idx_paths, ext=['jpg'])
if device == 'gpu':
read = read.gpu()
return fn.get_property(read, key="source_info")
def generate_wds_index(root_path, index_path):
from wds2idx import IndexCreator
with IndexCreator(root_path, index_path) as ic:
ic.create_index()
def _test_wds_properties(device, generate_index):
root_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar")
ref_filenames = ["2000.jpg", "2001.jpg", "2002.jpg", "2003.jpg", "2004.jpg", "2005.jpg",
"2006.jpg", "2007.jpg"]
ref_indices = [1536, 4096, 6144, 8704, 11264, 13824, 16384, 18432]
if generate_index:
with tempfile.TemporaryDirectory() as idx_dir:
index_paths = [os.path.join(idx_dir, os.path.basename(root_path) + ".idx")]
generate_wds_index(root_path, index_paths[0])
p = wds_properties(root_path, device, index_paths,
batch_size=8, num_threads=4, device_id=0)
p.build()
output = p.run()
else:
p = wds_properties(root_path, device, None, batch_size=8, num_threads=4, device_id=0)
p.build()
output = p.run()
for out in output:
out = out if device == 'cpu' else out.as_cpu()
for source_info, ref_fname, ref_idx in zip(out, ref_filenames, ref_indices):
assert _uint8_tensor_to_string(source_info) == f"{root_path}:{ref_idx}:{ref_fname}"
def test_wds_properties():
for dev in ['cpu', 'gpu']:
for gen_idx in [True, False]:
yield _test_wds_properties, dev, gen_idx
@pipeline_def
def tfr_properties(root_path, index_path, device):
import nvidia.dali.tfrecord as tfrec
features = {
"image/encoded": tfrec.FixedLenFeature((), tfrec.string, ""),
"image/class/label": tfrec.FixedLenFeature([1], tfrec.int64, -1)
}
inputs = fn.readers.tfrecord(path=root_path, index_path=index_path, features=features)
enc = fn.get_property(inputs["image/encoded"], key="source_info")
lab = fn.get_property(inputs["image/class/label"], key="source_info")
if device == 'gpu':
enc = enc.gpu()
lab = lab.gpu()
return enc, lab
def _test_tfr_properties(device):
root_path = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train')
index_path = os.path.join(get_dali_extra_path(), 'db', 'tfrecord', 'train.idx')
idx = [0, 171504, 553687, 651500, 820966, 1142396, 1380096, 1532947]
p = tfr_properties(root_path, index_path, device, batch_size=8, num_threads=4, device_id=0)
p.build()
output = p.run()
for out in output:
out = out if device == 'cpu' else out.as_cpu()
for source_info, ref_idx in zip(out, idx):
assert _uint8_tensor_to_string(source_info) == f"{root_path} at index {ref_idx}"
def test_tfr_properties():
for dev in ['cpu', 'gpu']:
yield _test_tfr_properties, dev
@pipeline_def
def es_properties(layouts, device):
num_outputs = len(layouts)
def gen_data():
yield np.random.rand(num_outputs, 3, 4, 5)
inp = fn.external_source(source=gen_data, layout=layouts, num_outputs=num_outputs,
batch=False, cycle=True, device=device)
return tuple(fn.get_property(i, key="layout") for i in inp)
def _test_es_properties(device):
layouts = ['ABC', 'XYZ']
p = es_properties(layouts, device, batch_size=8, num_threads=4, device_id=0)
p.build()
output = p.run()
for out, lt in zip(output, layouts):
out = out if device == 'cpu' else out.as_cpu()
for sample in out:
assert _uint8_tensor_to_string(sample), lt
def test_es_properties():
for dev in ['cpu', 'gpu']:
yield _test_es_properties, dev
@pipeline_def
def improper_property(root_path, device):
read = fn.readers.webdataset(paths=[root_path], ext=['jpg'])
return fn.get_property(read, key=["this key doesn't exist"])
@raises(RuntimeError, glob="Unknown property key*")
def _test_improper_property(device):
root_path = os.path.join(get_dali_extra_path(), "db/webdataset/MNIST/devel-0.tar")
p = improper_property(root_path, device, batch_size=8, num_threads=4, device_id=0)
p.build()
p.run()
def test_improper_property():
for dev in ['cpu', 'gpu']:
yield _test_improper_property, dev
|
DALI-main
|
dali/test/python/operator_1/test_get_property.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import random
from nvidia.dali.pipeline import Pipeline
from test_utils import RandomlyShapedDataIterator
from test_utils import compare_pipelines
class LookupTablePipeline(Pipeline):
def __init__(self, device, batch_size, iterator, data_shape, data_layout, dtype, num_threads=1,
device_id=0, dictionary={}, default_value=0.0):
super().__init__(batch_size, num_threads, device_id)
self.device = device
self.iterator = iterator
self.inputs = ops.ExternalSource()
self.data_shape = data_shape
self.data_layout = data_layout
if dictionary:
keys = [k for k in dictionary.keys()]
values = [dictionary[k] for k in keys]
self.lookup = ops.LookupTable(device=self.device, dtype=dtype,
default_value=default_value, keys=keys, values=values)
else:
self.lookup = ops.LookupTable(device=self.device, dtype=dtype,
default_value=default_value)
def define_graph(self):
self.data = self.inputs()
input_data = self.data.gpu() if self.device == 'gpu' else self.data
out = self.lookup(input_data)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.data_layout)
class LookupTablePythonOpPipeline(Pipeline):
def __init__(self, function, batch_size, iterator, data_shape, data_layout, dtype,
num_threads=1, device_id=0, dictionary={}, default_value=0.0):
super().__init__(batch_size, num_threads, device_id, exec_async=False, exec_pipelined=False)
self.iterator = iterator
self.inputs = ops.ExternalSource()
self.data_shape = data_shape
self.data_layout = data_layout
def lookup_table_func(input_data):
return function(input_data, dictionary=dictionary, default_value=default_value)
self.lookup = ops.PythonFunction(function=lookup_table_func, output_layouts=data_layout,
batch_processing=False)
self.cast = ops.Cast(dtype=dtype)
def define_graph(self):
self.data = self.inputs()
out = self.lookup(self.data)
out = self.cast(out)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.data_layout)
def lookup_func(image, dictionary, default_value):
arr = [default_value for k in range(0x1000)]
for k in dictionary.keys():
arr[k] = dictionary[k]
lut = np.array(arr)
return lut[image]
def check_lookup_table_vs_python_op(device, batch_size, layout, shape, dtype, dictionary_type,
default_value):
eii1 = RandomlyShapedDataIterator(batch_size, max_shape=shape)
eii2 = RandomlyShapedDataIterator(batch_size, max_shape=shape)
if dictionary_type == 'empty':
dictionary = {}
elif dictionary_type == 'random':
dictionary = {k: random.random() for k in range(256)}
elif dictionary_type == 'small':
dictionary = {0: 0.1, 200: 0.99}
else:
assert False
compare_pipelines(
LookupTablePipeline(device, batch_size, iter(eii1), data_shape=shape, data_layout=layout,
dtype=dtype, dictionary=dictionary, default_value=default_value),
LookupTablePythonOpPipeline(lookup_func, batch_size, iter(eii2), data_shape=shape,
data_layout=layout, dtype=dtype, dictionary=dictionary,
default_value=default_value),
batch_size=batch_size, N_iterations=3)
def test_lookup_table_vs_python_op():
layout = types.NHWC
for device in {'cpu', 'gpu'}:
for dtype in {types.FLOAT, types.FLOAT16, types.INT64}:
for batch_size, shape, dictionary_type, default_value in [
(1, (300, 300, 3), 'random', 0.0),
(1, (300, 300, 3), 'empty', 0.33),
(10, (300, 300, 3), 'random', 0.9),
(3, (300, 300, 3), 'small', 0.4)
]:
yield check_lookup_table_vs_python_op, device, batch_size, layout, shape, dtype, \
dictionary_type, default_value
|
DALI-main
|
dali/test/python/operator_1/test_lookup_table.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import random
from nvidia.dali import pipeline_def
from sequences_test_utils import ArgCb, video_suite_helper
from test_utils import RandomDataIterator
def dali_type_to_np(dtype):
if dtype == types.FLOAT:
return np.single
elif dtype == types.INT16:
return np.short
elif dtype == types.INT32:
return np.intc
elif dtype == types.UINT8:
return np.ubyte
else:
assert False
@pipeline_def()
def ColorTwistPipeline(data_iterator, is_input_float, inp_dtype, out_dtype):
imgs = fn.external_source(source=data_iterator)
o_dtype = dali_type_to_np(out_dtype)
# converting float inputs to integer outs leads to binary images as
# input is in -1 to 1 range in such case
if is_input_float and not np.issubdtype(o_dtype, np.floating):
imgs *= 255
H = fn.random.uniform(range=[-20, 20])
S = fn.random.uniform(range=[0, 2])
brightness = fn.random.uniform(range=[0, 2])
contrast = fn.random.uniform(range=[0, 2])
out_dtype_arg = out_dtype if out_dtype != inp_dtype else None
out_cpu, out_gpu = (
fn.color_twist(input, hue=H, saturation=S, brightness=brightness, contrast=contrast,
dtype=out_dtype_arg) for input in (imgs, imgs.gpu()))
return imgs, out_cpu, out_gpu, H, S, brightness, contrast
rgb2yiq = np.array([[.299, .587, .114],
[.596, -.274, -.321],
[.211, -.523, .311]])
yiq2rgb = np.linalg.inv(rgb2yiq)
def convert_sat(data, out_dtype):
clipped = data
if not np.issubdtype(out_dtype, np.floating):
max_range = np.iinfo(out_dtype).max
min_range = np.iinfo(out_dtype).min
clipped = np.clip(clipped, min_range, max_range)
clipped = np.round(clipped)
return clipped.astype(out_dtype)
def ref_color_twist(img, H, S, brightness, contrast, out_dtype):
inp_dtype = img.dtype
angle = math.radians(H)
s, c = math.sin(angle), math.cos(angle)
# Rotate the color components by angle and scale by S.
# The fun part is that it doesn't really matter that much which
hmat = np.array([[1, 0, 0],
[0, c * S, s * S],
[0, -s * S, c * S]])
m = np.matmul(yiq2rgb, np.matmul(hmat, rgb2yiq))
num_pixels = np.prod(img.shape[:-1])
pixels = img.reshape([num_pixels, img.shape[-1]])
pixels = np.matmul(pixels, m.transpose())
if np.issubdtype(inp_dtype, np.floating):
grey = 0.5
else:
grey = 128
pixels = ((pixels - grey) * contrast + grey) * brightness
img = pixels.reshape(img.shape)
return convert_sat(img, out_dtype)
def check(input, out_cpu, out_gpu, H, S, brightness, contrast, out_dtype):
ref = ref_color_twist(input, H, S, brightness, contrast, out_dtype)
if np.issubdtype(out_dtype, np.floating):
rel_err = 1e-3
abs_err = 1e-3
else:
rel_err = 1 / 512
# due to rounding error for integer out type can be off by 1
abs_err = 1
assert np.allclose(out_cpu, ref, rel_err, abs_err)
assert np.allclose(out_gpu, ref, rel_err, abs_err)
def check_ref(inp_dtype, out_dtype, has_3_dims):
batch_size = 32
n_iters = 8
shape = (128, 32, 3) if not has_3_dims else (random.randint(2, 5), 128, 32, 3)
inp_dtype = dali_type_to_np(inp_dtype)
ri1 = RandomDataIterator(batch_size, shape=shape, dtype=inp_dtype)
pipe = ColorTwistPipeline(seed=2139,
batch_size=batch_size,
num_threads=4,
device_id=0,
data_iterator=ri1,
is_input_float=np.issubdtype(inp_dtype, np.floating),
inp_dtype=inp_dtype,
out_dtype=out_dtype)
pipe.build()
for _ in range(n_iters):
inp, out_cpu, out_gpu, H, S, B, C = pipe.run()
out_gpu = out_gpu.as_cpu()
for i in range(batch_size):
h, s, b, c = H.at(i), S.at(i), B.at(i), C.at(i)
check(inp.at(i), out_cpu.at(i), out_gpu.at(i), h, s, b, c, dali_type_to_np(out_dtype))
def test_color_twist():
for inp_dtype in [types.FLOAT, types.INT16, types.UINT8]:
for out_dtype in [types.FLOAT, types.INT16, types.UINT8]:
has_3_dims = random.choice([False, True])
yield check_ref, inp_dtype, out_dtype, has_3_dims
def test_video():
def hue(sample_desc):
return np.float32(360 * sample_desc.rng.random())
def saturation(sample_desc):
return np.float32(sample_desc.rng.random())
def value(sample_desc):
return np.float32(sample_desc.rng.random())
def contrast(sample_desc):
return np.float32(2 * sample_desc.rng.random())
def brightness(sample_desc):
return np.float32(2 * sample_desc.rng.random())
video_test_cases = [
(fn.hue, {}, [ArgCb("hue", hue, True)]),
(fn.saturation, {}, [ArgCb("saturation", saturation, True)]),
(fn.hsv, {}, [
ArgCb("hue", hue, True),
ArgCb("saturation", saturation, True),
ArgCb("value", value, True)
]),
(fn.hsv, {}, [
ArgCb("hue", hue, False),
ArgCb("saturation", saturation, True),
ArgCb("value", value, False)
]),
(fn.color_twist, {}, [
ArgCb("brightness", brightness, True),
ArgCb("hue", hue, True),
ArgCb("saturation", saturation, True),
ArgCb("contrast", contrast, True),
]),
(fn.color_twist, {}, [ArgCb("brightness", brightness, True),
ArgCb("hue", hue, False)]),
]
yield from video_suite_helper(video_test_cases, test_channel_first=False)
def test_color_twist_default_dtype():
np_types = [types.FLOAT, types.INT32, types.INT16, types.UINT8] # Just some types
def impl(op, device, type):
@pipeline_def(batch_size=1, num_threads=3, device_id=0)
def pipeline():
data = fn.constant(idata=255, shape=(10, 10, 3), dtype=type, device=device)
return op(data)
pipe = pipeline()
pipe.build()
data, = pipe.run()
assert data[0].dtype == type, f"{data[0].dtype} != {type}"
for device in ['gpu', 'cpu']:
for type in np_types:
for op in [fn.hue]:
yield impl, op, device, type
|
DALI-main
|
dali/test/python/operator_1/test_color_twist.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nvidia.dali import pipeline_def, fn, types
from test_utils import np_type_to_dali, has_operator, restrict_platform
from nose_utils import assert_raises
from nose2.tools import params
def sample_to_lz4(sample):
import lz4.block
deflated_buf = lz4.block.compress(sample, store_size=False)
return np.frombuffer(deflated_buf, dtype=np.uint8)
def check_batch(inflated, baseline, batch_size, layout=None, oversized_shape=False):
layout = layout or ""
assert inflated.layout() == layout, (f"The batch layout '({inflated.layout()})' does "
f"not match the expected layout ({layout})")
inflated_samples = [np.array(sample) for sample in inflated.as_cpu()]
baseline_samples = [np.array(sample) for sample in baseline]
assert batch_size == len(inflated) == len(baseline)
if not oversized_shape:
for inflated_sample, baseline_sample in zip(inflated_samples, baseline_samples):
np.testing.assert_array_equal(inflated_sample, baseline_sample)
else:
for inflated_sample, baseline_sample in zip(inflated_samples, baseline_samples):
assert len(inflated_sample) == len(baseline_sample)
for inflated_frame, baseline_frame in zip(inflated_sample, baseline_sample):
flat_inflated = inflated_frame.reshape(-1)
baseline_size = baseline_frame.size
actually_inflated = flat_inflated[:baseline_size].reshape(baseline_frame.shape)
np.testing.assert_array_equal(actually_inflated, baseline_frame)
output_tail = flat_inflated[baseline_size:]
assert np.all(
output_tail == 0), (f"Oversized output was not properly padded with 0s. "
f"Tail size {len(output_tail)}, the tail {output_tail}")
def _test_sample_inflate(batch_size, np_dtype, seed):
epoch_size = 10 * batch_size
rng = np.random.default_rng(seed=seed)
permutation = rng.permutation(epoch_size)
dtype = np_type_to_dali(np_dtype)
def gen_iteration_sizes():
num_yielded_samples = 0
while num_yielded_samples < epoch_size:
iteration_size = np.int32(np.floor(rng.uniform(1, batch_size + 1)))
iteration_size = min(iteration_size, epoch_size - num_yielded_samples)
yield iteration_size
num_yielded_samples += iteration_size
iteration_sizes = list(gen_iteration_sizes())
assert sum(iteration_sizes) == epoch_size
def source():
num_yielded_samples = 0
for iteration_size in iteration_sizes:
sample_sizes = [permutation[num_yielded_samples + i] for i in range(iteration_size)]
num_yielded_samples += iteration_size
def sample(sample_size):
start = (sample_size - 1) * sample_size // 2
sample = np.arange(start, start + sample_size, dtype=np_dtype)
return sample, sample_to_lz4(sample)
samples, deflated = list(zip(*[sample(sample_size) for sample_size in sample_sizes]))
yield list(samples), list(deflated), np.array(sample_sizes, dtype=np.int32)
@pipeline_def
def pipeline():
sample, deflated, shape = fn.external_source(source=source, batch=True, num_outputs=3)
inflated = fn.experimental.inflate(deflated.gpu(), shape=shape, dtype=dtype)
return inflated, sample
pipe = pipeline(batch_size=batch_size, num_threads=4, device_id=0)
pipe.build()
for iter_size in iteration_sizes:
inflated, baseline = pipe.run()
check_batch(inflated, baseline, iter_size)
@has_operator("experimental.inflate")
@restrict_platform(min_compute_cap=6.0, platforms=["x86_64"])
def test_sample_inflate():
seed = 42
for batch_size in [1, 64, 348]:
for dtype in [np.uint8, np.int8, np.uint16, np.int32, np.float32, np.float16]:
yield _test_sample_inflate, batch_size, dtype, seed
seed += 1
def _test_scalar_shape(dtype, shape, layout):
def sample_source(sample_info):
sample_size = np.prod(shape)
x = sample_info.idx_in_epoch + 1
sample = np.arange(0, sample_size, dtype=dtype).reshape(shape) * x
return sample
def deflated_source(sample_info):
sample = sample_source(sample_info)
return np.array(sample_to_lz4(sample))
@pipeline_def
def pipeline():
baseline = fn.external_source(source=sample_source, batch=False)
deflated = fn.external_source(source=deflated_source, batch=False, device="gpu")
inflated = fn.experimental.inflate(deflated, shape=shape, dtype=np_type_to_dali(dtype),
layout=layout)
return inflated, baseline
batch_size = 16
pipe = pipeline(batch_size=batch_size, num_threads=8, device_id=0)
pipe.build()
for _ in range(4):
inflated, baseline = pipe.run()
check_batch(inflated, baseline, batch_size, layout)
@has_operator("experimental.inflate")
@restrict_platform(min_compute_cap=6.0, platforms=["x86_64"])
def test_scalar_shape():
largest_prime_smaller_than_2_to_16 = 65521
prime_larger_than_2_to_16 = 262147
for shape, layout in [(largest_prime_smaller_than_2_to_16, "X"),
(largest_prime_smaller_than_2_to_16, None),
(prime_larger_than_2_to_16, "Y"), ([3, 5, 7], "ABC"), ([3, 5, 7], ""),
([13, 15, 7], None), (np.array([31, 101, 17], dtype=np.int32), "DEF"),
([4, 8, 16, 2], "FGNH"), ([100, 10], "WW"),
(np.array([], dtype=np.int32), None)]:
for dtype in [np.uint8, np.float32, np.uint16]:
yield _test_scalar_shape, dtype, shape, layout
def seq_source(rng, ndim, dtype, mode, permute, oversized_shape):
def uniform(shape):
return dtype(rng.uniform(-2**31, 2**31 - 1, shape))
def std(shape):
return dtype(128 * rng.standard_normal(shape) + 3)
def smaller_std(shape):
return dtype(16 * rng.standard_normal(shape))
def inflate_shape(shape):
multiplier = rng.uniform(1, 2, ndim)
return np.int32(shape * multiplier)
def inner():
max_extent_size = 64 if ndim >= 3 else 128
distrs = [uniform, std, smaller_std]
distrs = rng.permutation(distrs)
num_chunks = np.int32(rng.uniform(1, 32))
shape = np.int32(rng.uniform(0, max_extent_size, ndim))
sample = np.array([(distrs[i % len(distrs)])(shape) for i in range(num_chunks)],
dtype=dtype)
chunks = [sample_to_lz4(chunk) for chunk in sample]
sizes = [len(chunk) for chunk in chunks]
offsets = np.int32(np.cumsum([0] + sizes[:-1]))
sizes = np.array(sizes, dtype=np.int32)
deflated = np.concatenate(chunks)
reported_shape = shape if not oversized_shape else inflate_shape(shape)
if permute:
assert mode == "offset_and_size"
perm = rng.permutation(num_chunks)
subset = rng.choice([True, False], num_chunks)
sample = sample[perm][subset]
offsets = offsets[perm][subset]
sizes = sizes[perm][subset]
if mode == "offset_only":
return sample, deflated, reported_shape, offsets
elif mode == "size_only":
return sample, deflated, reported_shape, sizes
else:
assert mode == "offset_and_size"
return sample, deflated, reported_shape, offsets, sizes
return inner
def _test_chunks(seed, batch_size, ndim, dtype, layout, mode, permute, oversized_shape,
sequence_axis_name):
rng = np.random.default_rng(seed=seed)
source = seq_source(rng, ndim, dtype, mode, permute, oversized_shape)
@pipeline_def
def pipeline():
baseline, deflated, reported_shape, *rest = fn.external_source(
source=source, batch=False, num_outputs=5 if mode == "offset_and_size" else 4)
if mode == "offset_only":
offsets, = rest
sizes = None
elif mode == "size_only":
sizes, = rest
offsets = None
else:
offsets, sizes = rest
inflated = fn.experimental.inflate(deflated.gpu(), shape=reported_shape,
dtype=np_type_to_dali(dtype), chunk_offsets=offsets,
chunk_sizes=sizes, layout=layout,
sequence_axis_name=sequence_axis_name)
return inflated, baseline
pipe = pipeline(batch_size=batch_size, num_threads=4, device_id=0)
pipe.build()
if layout:
layout = (sequence_axis_name or "F") + layout
for _ in range(4):
inflated, baseline = pipe.run()
check_batch(inflated, baseline, batch_size, layout, oversized_shape=oversized_shape)
@has_operator("experimental.inflate")
@restrict_platform(min_compute_cap=6.0, platforms=["x86_64"])
def test_chunks():
seed = 42
batch_sizes = [1, 9, 31]
for dtype in [np.uint8, np.int16, np.float32]:
for ndim, layout, sequence_axis_name in [(0, None, None), (1, None, "F"), (2, "XY", "Q"),
(2, None, None), (3, "ABC", None), (3, "", "W")]:
for mode, permute in [("offset_only", False), ("size_only", False),
("offset_and_size", False), ("offset_and_size", True)]:
batch_size = batch_sizes[seed % len(batch_sizes)]
oversized_shape = ndim > 0 and seed % 2 == 1
yield _test_chunks, seed, batch_size, ndim, dtype, layout, mode, \
permute, oversized_shape, sequence_axis_name
seed += 1
@has_operator("experimental.inflate")
@restrict_platform(min_compute_cap=6.0, platforms=["x86_64"])
@params({"chunk_offsets": []}, {"chunk_sizes": []}, {"chunk_offsets": np.array([], dtype=np.int32)},
{"chunk_sizes": np.array([], dtype=np.int32)})
def test_total_no_chunks(ex_kwargs):
frame = np.full((128, 128, 3), 42, dtype=np.uint8)
chunks = [sample_to_lz4(frame)] * 7
deflated = np.concatenate(chunks)
baseline = np.array([], dtype=np.uint8).reshape((0, 128, 128, 3))
@pipeline_def
def pipeline():
inflate = fn.external_source(source=lambda _: deflated, batch=False)
return fn.experimental.inflate(inflate.gpu(), shape=(128, 128, 3), layout="HWC",
**ex_kwargs)
batch_size = 8
pipe = pipeline(batch_size=batch_size, num_threads=4, device_id=0)
pipe.build()
for _ in range(2):
(inflated, ) = pipe.run()
check_batch(inflated, [baseline] * batch_size, batch_size, layout="FHWC")
def _test_validation(pipeline, error_glob, kwargs=None):
with assert_raises(RuntimeError, glob=error_glob):
pipe = pipeline(batch_size=4, num_threads=4, device_id=0, **(kwargs or {}))
pipe.build()
pipe.run()
@has_operator("experimental.inflate")
@restrict_platform(min_compute_cap=6.0, platforms=["x86_64"])
def test_validation():
@pipeline_def
def pipeline_2d_shape():
inp = fn.external_source(source=lambda: np.array([1, 2, 3, 4], dtype=np.uint8), batch=False)
inflated = fn.experimental.inflate(inp.gpu(), shape=np.array([[1, 5], [4, 5]],
dtype=np.int32))
return inflated
@pipeline_def
def pipeline_non_elementary_dtype():
inp = fn.external_source(source=lambda: np.array([1, 2, 3, 4], dtype=np.uint8), batch=False)
inflated = fn.experimental.inflate(inp.gpu(), shape=4,
dtype=types.DALIDataType.TENSOR_LAYOUT)
return inflated
@pipeline_def
def pipeline_input_float():
inp = fn.external_source(source=lambda: np.array([1, 2, 3, 4], dtype=np.float32),
batch=False)
inflated = fn.experimental.inflate(inp.gpu(), shape=42)
return inflated
@pipeline_def
def pipeline_input_scalar():
inp = fn.external_source(source=lambda: np.array(1, dtype=np.uint8), batch=False)
inflated = fn.experimental.inflate(inp.gpu(), shape=42)
return inflated
@pipeline_def
def pipeline_input_algorithm():
inp = fn.external_source(source=lambda: np.array([1], dtype=np.uint8), batch=False)
inflated = fn.experimental.inflate(inp.gpu(), shape=42, algorithm="")
return inflated
@pipeline_def
def pipeline_too_big_chunk():
inp = fn.external_source(source=lambda: np.array([1, 2, 3, 4, 5], dtype=np.uint8),
batch=False)
inflated = fn.experimental.inflate(inp.gpu(), shape=42, chunk_sizes=[6])
return inflated
@pipeline_def
def pipeline_too_big_chunks():
inp = fn.external_source(source=lambda: np.array([1, 2, 3, 4, 5], dtype=np.uint8),
batch=False)
inflated = fn.experimental.inflate(inp.gpu(), shape=42, chunk_sizes=[3, 3])
return inflated
@pipeline_def
def pipeline_empty_chunk():
inp = fn.external_source(source=lambda: np.array([1, 2, 3, 4, 5], dtype=np.uint8),
batch=False)
inflated = fn.experimental.inflate(inp.gpu(), shape=42, chunk_sizes=[0])
return inflated
@pipeline_def
def pipeline_neg_chunk():
inp = fn.external_source(source=lambda: np.array([1, 2, 3, 4, 5], dtype=np.uint8),
batch=False)
inflated = fn.experimental.inflate(inp.gpu(), shape=42, chunk_sizes=[3, -1])
return inflated
@pipeline_def
def pipeline_too_big_offsets():
inp = fn.external_source(source=lambda: np.array([1, 2, 3, 4, 5], dtype=np.uint8),
batch=False)
inflated = fn.experimental.inflate(inp.gpu(), shape=42, chunk_offsets=[0, 5])
return inflated
@pipeline_def
def pipeline_too_zero_size_inferred():
inp = fn.external_source(source=lambda: np.array([1, 2, 3, 4, 5], dtype=np.uint8),
batch=False)
inflated = fn.experimental.inflate(inp.gpu(), shape=42, chunk_offsets=[1, 1])
return inflated
@pipeline_def
def pipeline_sizes_offsets_mismatched():
inp = fn.external_source(source=lambda: np.array([1, 2, 3, 4, 5], dtype=np.uint8),
batch=False)
inflated = fn.experimental.inflate(inp.gpu(), shape=42, chunk_offsets=[1, 1],
chunk_sizes=[1, 1, 1])
return inflated
@pipeline_def
def pipeline_negative_offset():
inp = fn.external_source(source=lambda: np.array([1, 2, 3, 4, 5], dtype=np.uint8),
batch=False)
inflated = fn.experimental.inflate(inp.gpu(), shape=42, chunk_offsets=[-5, 0],
chunk_sizes=[5, 5])
return inflated
@pipeline_def
def pipeline_chunk_exceeding_sample():
inp = fn.external_source(source=lambda: np.array([1, 2, 3, 4, 5], dtype=np.uint8),
batch=False)
inflated = fn.experimental.inflate(inp.gpu(), shape=42, chunk_offsets=[2], chunk_sizes=[4])
return inflated
@pipeline_def
def pipeline_sequence_axis_no_name():
inp = fn.external_source(source=lambda: np.array([1, 2, 3, 4, 5], dtype=np.uint8),
batch=False)
inflated = fn.experimental.inflate(inp.gpu(), shape=5, sequence_axis_name="")
return inflated
@pipeline_def
def pipeline_sequence_axis_too_long_name():
inp = fn.external_source(source=lambda: np.array([1, 2, 3, 4, 5], dtype=np.uint8),
batch=False)
inflated = fn.experimental.inflate(inp.gpu(), shape=5, sequence_axis_name="AB")
return inflated
yield _test_validation, pipeline_2d_shape, "The shape argument must be a scalar or a 1D tensor"
yield _test_validation, pipeline_non_elementary_dtype, \
"The inflate output type must have floating point or integral type"
yield _test_validation, pipeline_input_float, "Got tensor of type `float` instead"
yield _test_validation, pipeline_input_scalar, "Got input with 0 dimensions instead"
yield _test_validation, pipeline_input_algorithm, "Unknown inflate algorithm"
yield _test_validation, pipeline_too_big_chunk, "Input chunk size cannot exceed the sample size"
yield _test_validation, pipeline_too_big_chunks, \
"The sum of chunk sizes for sample of idx 0 exceeds the total size of the sample."
yield _test_validation, pipeline_empty_chunk, "Got chunk size 0 for sample of idx 0"
yield _test_validation, pipeline_neg_chunk, "Got chunk size -1 for sample of idx 0"
yield _test_validation, pipeline_too_big_offsets, \
"Got chunk offset 5 while the sample size is 5 for sample of idx 0"
yield _test_validation, pipeline_too_zero_size_inferred, \
"The inferred size of a chunk would be non-positive for sample of idx 0"
yield _test_validation, pipeline_sizes_offsets_mismatched, \
"for sample of idx 0 there are 2 offsets and 3 sizes"
yield _test_validation, pipeline_negative_offset, \
"Input chunks offsets must be non-negative"
yield _test_validation, pipeline_chunk_exceeding_sample, \
"Input chunk cannot exceed the sample size"
yield _test_validation, pipeline_sequence_axis_no_name, \
"The `sequence_axis_name` must be a single character, got \"\""
yield _test_validation, pipeline_sequence_axis_too_long_name, \
"The `sequence_axis_name` must be a single character, got \"AB\""
|
DALI-main
|
dali/test/python/operator_1/test_inflate.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali.fn as fn
from nvidia.dali import pipeline_def
import nvidia.dali.types as types
import numpy as np
from test_utils import compare_pipelines, python_function
from test_utils import RandomDataIterator
from sequences_test_utils import ArgCb, video_suite_helper
import random
def max_range(dtype):
if dtype == np.half or dtype == np.single or dtype == np.double:
return 1.0
else:
return np.iinfo(dtype).max
def type_range(dtype):
if dtype in [np.half, np.single, np.double]:
return (np.finfo(dtype).min, np.finfo(dtype).max)
else:
return (np.iinfo(dtype).min, np.iinfo(dtype).max)
def convert_sat(data, out_dtype):
clipped = np.clip(data, *type_range(out_dtype))
if out_dtype not in [np.half, np.single, np.double]:
clipped = np.round(clipped)
return clipped.astype(out_dtype)
def dali_type_to_np(dtype):
if dtype == types.FLOAT:
return np.single
elif dtype == types.INT16:
return np.short
elif dtype == types.INT32:
return np.intc
elif dtype == types.UINT8:
return np.ubyte
else:
assert False
def bricon_ref(input, brightness, brightness_shift, contrast, contrast_center, out_dtype):
output_range = max_range(out_dtype)
output = (brightness_shift * output_range
+ brightness * (contrast_center + contrast * (input - contrast_center)))
return convert_sat(output, out_dtype)
def contrast_param():
return fn.random.uniform(range=[-1.0, 1.0], seed=123)
def contrast_center_param():
return fn.random.uniform(range=[0., 1.0], seed=123)
def brightness_params():
return fn.random.uniform(range=[0.0, 5.0], seed=123), \
fn.random.uniform(range=[-1.0, 1.0], seed=123)
@pipeline_def(num_threads=4, device_id=0, seed=1234)
def bri_pipe(data_iterator, dtype, dev='cpu'):
brightness, brightness_shift = brightness_params()
inp = fn.external_source(source=data_iterator)
if dev == 'gpu':
inp = inp.gpu()
return fn.brightness(inp, brightness=brightness, brightness_shift=brightness_shift, dtype=dtype)
@pipeline_def(num_threads=4, device_id=0, seed=1234)
def con_pipe(data_iterator, contrast_center, dtype, dev='cpu'):
contrast = contrast_param()
if contrast_center is None:
contrast_center = contrast_center_param()
inp = fn.external_source(source=data_iterator)
if dev == 'gpu':
inp = inp.gpu()
return fn.contrast(inp, contrast=contrast, contrast_center=contrast_center, dtype=dtype)
@pipeline_def(num_threads=4, device_id=0, seed=1234)
def bricon_pipe(data_iterator, contrast_center, bri, con, dtype, dev='cpu'):
if bri:
brightness, brightness_shift = brightness_params()
if con:
contrast = contrast_param()
if contrast_center is None:
contrast_center = contrast_center_param()
inp = fn.external_source(source=data_iterator)
if dev == 'gpu':
inp = inp.gpu()
if bri and con:
return fn.brightness_contrast(inp, brightness=brightness, brightness_shift=brightness_shift,
contrast=contrast, contrast_center=contrast_center,
dtype=dtype)
elif bri:
return fn.brightness_contrast(inp, brightness=brightness, brightness_shift=brightness_shift,
dtype=dtype)
elif con:
return fn.brightness_contrast(inp, contrast=contrast, contrast_center=contrast_center,
dtype=dtype)
@pipeline_def(num_threads=4, device_id=0, seed=1234, exec_pipelined=False, exec_async=False)
def bricon_ref_pipe(data_iterator, contrast_center, dtype, has_3_dims=False):
brightness, brightness_shift = brightness_params()
contrast = contrast_param()
if contrast_center is None:
contrast_center = contrast_center_param()
inp = fn.external_source(source=data_iterator)
layout = "FHWC" if has_3_dims else "HWC"
return python_function(
inp, brightness, brightness_shift, contrast, contrast_center, dali_type_to_np(dtype),
function=bricon_ref, output_layouts=layout)
def check_equivalence(device, inp_dtype, out_dtype, op, has_3_dims, use_const_contr_center):
batch_size = 32
n_iters = 16
shape = (128, 32, 3) if not has_3_dims else (random.randint(2, 5), 128, 32, 3)
ri1 = RandomDataIterator(batch_size, shape=shape, dtype=dali_type_to_np(inp_dtype))
ri2 = RandomDataIterator(batch_size, shape=shape, dtype=dali_type_to_np(inp_dtype))
contrast_center = None if not use_const_contr_center else 0.4 * \
max_range(dali_type_to_np(inp_dtype))
if op == 'brightness':
pipe1 = bri_pipe(ri1, out_dtype, device, batch_size=batch_size)
else:
pipe1 = con_pipe(ri1, contrast_center, out_dtype, device, batch_size=batch_size)
bri = op == 'brightness'
con = op == 'contrast'
pipe2 = bricon_pipe(ri2, contrast_center, bri, con, out_dtype, device, batch_size=batch_size)
if out_dtype in [np.half, np.single, np.double]:
eps = 1e-4
else:
eps = 1
compare_pipelines(pipe1, pipe2, batch_size, n_iters, eps=eps)
def test_equivalence():
rng = random.Random(42)
for device in ['cpu', 'gpu']:
for inp_dtype in [types.FLOAT, types.INT16, types.UINT8]:
for out_dtype in [types.FLOAT, types.INT16, types.UINT8]:
for op in ['brightness', 'contrast']:
for (has_3_dims, use_const_contr_center) in rng.sample([
(b1, b2) for b1 in [True, False] for b2 in [True, False]], 2):
yield check_equivalence, device, inp_dtype, out_dtype, op, has_3_dims, \
use_const_contr_center
def check_vs_ref(device, inp_dtype, out_dtype, has_3_dims, use_const_contr_center):
batch_size = 32
n_iters = 8
shape = (128, 32, 3) if not has_3_dims else (random.randint(2, 5), 128, 32, 3)
ri1 = RandomDataIterator(batch_size, shape=shape, dtype=dali_type_to_np(inp_dtype))
ri2 = RandomDataIterator(batch_size, shape=shape, dtype=dali_type_to_np(inp_dtype))
contrast_center = None if not use_const_contr_center else 0.4 * \
max_range(dali_type_to_np(inp_dtype))
pipe1 = bricon_ref_pipe(ri1, contrast_center, out_dtype,
has_3_dims=has_3_dims, batch_size=batch_size)
pipe2 = bricon_pipe(ri2, contrast_center, True, True, out_dtype, device, batch_size=batch_size)
if out_dtype in [np.half, np.single, np.double]:
eps = 1e-4
else:
eps = 1
compare_pipelines(pipe1, pipe2, batch_size, n_iters, eps=eps)
def test_vs_ref():
rng = random.Random(42)
for device in ['cpu', 'gpu']:
for inp_dtype in [types.FLOAT, types.INT16, types.UINT8]:
for out_dtype in [types.FLOAT, types.INT16, types.UINT8]:
for (has_3_dims, use_const_contr_center) in rng.sample([
(b1, b2) for b1 in [True, False] for b2 in [True, False]], 2):
yield check_vs_ref, device, inp_dtype, out_dtype, has_3_dims, \
use_const_contr_center
def test_video():
def brightness(sample_desc):
return np.float32(2 * sample_desc.rng.random())
def brightness_shift(sample_desc):
return np.float32(sample_desc.rng.random())
def contrast(sample_desc):
return np.float32(2 * sample_desc.rng.random())
def contrast_center(sample_desc):
return np.float32(sample_desc.rng.random())
video_test_cases = [
(fn.brightness, {
'dtype': types.INT32
}, [ArgCb("brightness", brightness, True)]),
(fn.brightness, {
'dtype': types.UINT8
}, [
ArgCb("brightness_shift", brightness_shift, True),
ArgCb("brightness", brightness, False)
]),
(fn.contrast, {
'dtype': types.FLOAT
}, [ArgCb("contrast", contrast, True)]),
(fn.contrast, {
'dtype': types.FLOAT
}, [ArgCb("contrast", contrast, True),
ArgCb("contrast_center", contrast_center, False)]),
(fn.contrast, {
'dtype': types.UINT8
}, [ArgCb("contrast_center", contrast_center, True)]),
(fn.brightness_contrast, {
'dtype': types.UINT8
}, [
ArgCb("contrast", contrast, False),
ArgCb("contrast_center", contrast_center, True),
ArgCb("brightness", brightness, True)
]),
(fn.brightness_contrast, {}, [
ArgCb("brightness", brightness, True),
ArgCb("brightness_shift", brightness_shift, True),
ArgCb("contrast", contrast, True),
ArgCb("contrast_center", contrast_center, True)
]),
]
yield from video_suite_helper(video_test_cases, test_channel_first=False)
|
DALI-main
|
dali/test/python/operator_1/test_brightness_contrast.py
|
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
from test_utils import check_batch
from nose_utils import raises
import numpy as np
num_classes = 20
batch_size = 10
def insert_as_axis(target, value, axis, total_axes):
return target[0:axis] + (value,) + target[axis:total_axes]
def get_initial_layout(sample_dim=0, base="ABCD"):
return base[0:sample_dim]
def modify_layout(layout, output_dim, axis=None, axis_name=None):
if not axis_name or (not layout and output_dim > 1):
return ""
if output_dim == 1:
return axis_name
layout = layout or ""
if axis < 0:
axis = len(layout)
return layout[:axis] + axis_name + layout[axis:]
def random_3d_tensors_batch():
return [
np.random.randint(0, num_classes, size=np.random.randint(2, 8, size=(3,)), dtype=np.int32)
for _ in range(batch_size)
]
def random_scalars_batch():
return np.random.randint(0, num_classes, size=batch_size, dtype=np.int32)
def random_scalar_like_tensors_batch(nested_level):
return [
np.array([np.random.randint(0, num_classes)], dtype=np.int32).reshape((1, ) * nested_level)
for x in range(batch_size)
]
class OneHotPipeline(Pipeline):
def __init__(self, num_classes, source, axis=-1, num_threads=1,
layout=None, axis_name=None, device='cpu'):
super(OneHotPipeline, self).__init__(batch_size, num_threads, 0)
self.is_gpu = device == 'gpu'
self.ext_src = ops.ExternalSource(source=source, layout=layout)
self.one_hot = ops.OneHot(num_classes=num_classes, axis=axis,
dtype=types.INT32, device=device, axis_name=axis_name)
def define_graph(self):
self.data = self.ext_src()
if self.is_gpu:
self.data = self.data.gpu()
return self.one_hot(self.data), self.data
def one_hot_3_axes(input, axis):
total_axes = len(input[0].shape)
assert total_axes == 3
axis = axis if axis >= 0 else total_axes
shapes = []
results = []
for i in range(batch_size):
shape = insert_as_axis(input[i].shape, num_classes, axis, total_axes)
result = np.zeros(shape, dtype=np.int32)
shapes.append(shape)
for i0 in range(input[i].shape[0]):
for i1 in range(input[i].shape[1]):
for i2 in range(input[i].shape[2]):
in_coord = (i0, i1, i2)
out_coord = insert_as_axis(in_coord, input[i][in_coord], axis, total_axes)
result[out_coord] = 1
results.append(result)
return results
def one_hot(input):
outp = np.zeros([batch_size, num_classes], dtype=np.int32)
for i in range(batch_size):
outp[i, int(input[i])] = 1
return outp
def check_one_hot_operator(source, device='cpu', axis=-1,
expected_output_dim=None, axis_name=None, initial_layout=None):
pipeline = OneHotPipeline(
num_classes=num_classes, source=source, axis=axis,
layout=initial_layout, axis_name=axis_name, device=device)
pipeline.build()
(outputs, input_batch) = pipeline.run()
if device == 'gpu':
input_batch = input_batch.as_cpu()
input_batch = list(map(np.array, input_batch))
expected_output_dim = expected_output_dim or len(input_batch[0].shape) + 1
reference = one_hot_3_axes(
input_batch, axis) if expected_output_dim == 4 else one_hot(input_batch)
expected_layout = modify_layout(
initial_layout, expected_output_dim, axis, axis_name)
check_batch(outputs, reference, batch_size,
max_allowed_error=0, expected_layout=expected_layout)
def test_one_hot_scalar():
np.random.seed(42)
for device in ['cpu', 'gpu']:
for i in range(10):
yield partial(check_one_hot_operator, axis_name='O'), random_scalars_batch, device
def test_one_hot_legacy():
np.random.seed(42)
for device in ['cpu', 'gpu']:
for j in range(1, 5): # test 1..4 levels of nested 'multi-dimensional' scalars
layout = get_initial_layout(j)
class RandomScalarLikeTensors():
def __init__(self, i):
self.i = i
def __call__(self):
return random_scalar_like_tensors_batch(self.i)
for i in range(5):
yield partial(check_one_hot_operator, axis=None, axis_name='O',
expected_output_dim=1,
initial_layout=layout), RandomScalarLikeTensors(j), device
def test_one_hot():
np.random.seed(42)
for device in ['cpu', 'gpu']:
layout = get_initial_layout(3)
for i in range(10):
for axis in [-1, 0, 1, 2, 3]:
yield partial(check_one_hot_operator, axis_name='O', initial_layout=layout), \
random_3d_tensors_batch, device, axis
def test_multi_dim_one_hot_no_initial_layout():
np.random.seed(42)
for axis in [-1, 0, 1, 2, 3]:
yield partial(check_one_hot_operator,
initial_layout=None), random_3d_tensors_batch, 'cpu', axis
def test_one_hot_reset_layout():
np.random.seed(42)
layout = get_initial_layout(3)
for axis in [-1, 0, 1, 2, 3]:
yield partial(check_one_hot_operator,
initial_layout=layout), random_3d_tensors_batch, 'cpu', axis
yield check_one_hot_operator, random_scalars_batch
def random_scalar_like_tensors():
return random_scalar_like_tensors_batch(3)
yield partial(check_one_hot_operator, axis=None, expected_output_dim=1,
initial_layout=layout), random_scalar_like_tensors, 'cpu'
def test_one_hot_custom_layout_axis_name():
np.random.seed(42)
layout = get_initial_layout(3)
for axis_name in "Xx01":
yield partial(check_one_hot_operator, axis=-1, initial_layout=layout,
axis_name=axis_name), random_3d_tensors_batch
@raises(RuntimeError, glob='Unsupported axis_name value')
def test_too_long_axis_name():
np.random.seed(42)
check_one_hot_operator(random_3d_tensors_batch, axis=-1, initial_layout="ABC", axis_name="CD")
@raises(RuntimeError, glob='Unsupported axis_name value')
def test_empty_string_axis_name():
np.random.seed(42)
check_one_hot_operator(random_3d_tensors_batch, axis=-1, initial_layout="ABC", axis_name="")
@raises(RuntimeError, glob='Input layout mismatch')
def test_axis_name_no_initial_layout_multi_dim():
np.random.seed(42)
check_one_hot_operator(random_3d_tensors_batch, axis=-1, axis_name="O")
|
DALI-main
|
dali/test/python/operator_1/test_one_hot.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali.fn as fn
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import os
from nvidia.dali import Pipeline
from test_utils import check_batch
from test_utils import get_dali_extra_path
jpeg_folder = os.path.join(get_dali_extra_path(), 'db', 'single', 'jpeg')
array_interfaces = [(np.array, None)]
try:
import torch
array_interfaces.append((torch.tensor, lambda x: eval('torch.' + x)))
print("ConstantOp: PyTorch support enabled")
except ModuleNotFoundError:
print("ConstantOp: PyTorch support disabled")
pass
try:
import mxnet
array_interfaces.append((mxnet.ndarray.array, None))
print("ConstantOp: MXNet support enabled")
except ModuleNotFoundError:
print("ConstantOp: MXNet support disabled")
pass
class ConstantPipeline(Pipeline):
def __init__(self, device):
super().__init__(10, 3, device_id=0, exec_async=True, exec_pipelined=True)
self.const1 = ops.Constant(device=device, fdata=(1.25, 2.5, 3))
self.const2 = ops.Constant(device=device, idata=(1, 2, 3, 4), shape=(2, 1, 2))
self.const3 = ops.Constant(device=device, idata=(-1, 1, 2, 3, 4), dtype=types.UINT8)
self.const4 = ops.Constant(device=device, fdata=(0.25, 1.25, 2.25, 3.25, 4.25),
dtype=types.FLOAT16)
self.const5 = ops.Constant(device=device, fdata=5.5, shape=(100, 100))
self.const6 = ops.Constant(device=device, idata=-4, shape=(10, 20))
self.const7 = ops.Constant(device=device, idata=[0, 1, 0], dtype=types.BOOL)
def define_graph(self):
return self.const1(), self.const2(), self.const3(), self.const4(), self.const5(), \
self.const6(), self.const7()
class ConstantFnPipeline(Pipeline):
def __init__(self, device, array_interface):
super().__init__(10, 3, device_id=0, exec_async=True, exec_pipelined=True)
self.device = device
self.array = array_interface[0]
self.dtype = array_interface[1]
if self.dtype is None:
self.dtype = lambda x: x
def define_graph(self):
device = self.device
return [
types.Constant(
device=device,
value=(1.25, 2.5, 3)),
types.Constant(
device=device,
value=self.array([[[1, 2]], [[3, 4]]], dtype=self.dtype('int32'))),
types.Constant(
device=device,
value=self.array([0, 1, 2, 3, 4], dtype=self.dtype('uint8'))),
types.Constant(
device=device,
value=self.array([0.25, 1.25, 2.25, 3.25, 4.25], dtype=self.dtype('float16'))),
types.Constant(
device=device,
value=5.5,
shape=(100, 100),
name="large"),
types.Constant(
device=device,
value=-4,
shape=(10, 20)),
types.Constant(
device=device,
value=[False, True, False])
]
class ScalarConstantPipeline(Pipeline):
def __init__(self, device):
super().__init__(10, 3, device_id=0, exec_async=True, exec_pipelined=True)
self.device = device
def define_graph(self):
device = self.device
return [
# no-op
ops.Reshape(device=device, shape=[1])(types.Constant(1.25)),
# flatten with reshape op
ops.Reshape(device=device)(
types.Constant(np.array([[1, 2], [3, 4]], dtype=np.uint16), device=device),
shape=types.Constant([4]))
]
def check(a1, a2):
if a1.dtype != a2.dtype:
print(a1.dtype, a2.dtype)
assert a1.dtype == a2.dtype
assert np.array_equal(a1, a2)
ref = [
np.array([1.25, 2.5, 3], dtype=np.float32),
np.array([[[1, 2]], [[3, 4]]], dtype=np.int32),
np.array([0, 1, 2, 3, 4], dtype=np.uint8),
np.array([0.25, 1.25, 2.25, 3.25, 4.25], dtype=np.float16),
np.full([100, 100], 5.5, dtype=np.float32),
np.full([10, 20], -4, dtype=np.int32),
np.array([False, True, False], dtype=bool)
]
def _test_op(device):
pipe = ConstantPipeline(device)
pipe.build()
for iter in range(3):
out = pipe.run()
if device == "gpu":
out = [o.as_cpu() for o in out]
for o in range(len(ref)):
for i in range(len(out[o])):
check(out[o].at(i), ref[o])
def _test_func(device, array_interface):
pipe = ConstantFnPipeline(device, array_interface)
pipe.build()
for iter in range(3):
out = pipe.run()
if device == "gpu":
out = [o.as_cpu() for o in out]
for o in range(len(ref)):
for i in range(len(out[o])):
check(out[o].at(i), ref[o])
def _test_scalar_constant_promotion(device):
pipe = ScalarConstantPipeline(device)
pipe.build()
ref = [
np.array([1.25], dtype=np.float32),
np.array([1, 2, 3, 4], dtype=np.uint16)
]
for iter in range(3):
out = pipe.run()
if device == "gpu":
out = [o.as_cpu() for o in out]
for o in range(len(ref)):
for i in range(len(out[o])):
check(out[o].at(i), ref[o])
def test_constant_op():
yield _test_op, "cpu"
yield _test_op, "gpu"
def test_constant_fn():
for device in ["cpu", "gpu"]:
for array_interface in array_interfaces:
yield _test_func, device, array_interface
def test_scalar_constant_promotion():
yield _test_scalar_constant_promotion, "cpu"
yield _test_scalar_constant_promotion, "gpu"
def test_variable_batch():
pipe = Pipeline(6, 1, 0)
batches = [
[np.array(1), np.array(2)],
[np.array(1)],
[np.array(1), np.array(2), np.array(3), np.array(4), np.array(5), np.array(5)]
]
dummy = fn.external_source(batches, cycle=True)
val = np.float32([[1, 2], [3, 4]])
pipe.set_outputs(types.Constant(val, device="cpu"), types.Constant(val, device="gpu"), dummy)
pipe.build()
for batch in batches:
cpu, gpu, _ = pipe.run()
assert len(cpu) == len(batch)
assert len(gpu) == len(batch)
gpu = gpu.as_cpu()
for i in range(len(batch)):
assert np.array_equal(cpu.at(i), val)
assert np.array_equal(gpu.at(i), val)
def test_constant_promotion_mixed():
filename = os.path.join(jpeg_folder, "241", "cute-4074304_1280.jpg")
file_contents = np.fromfile(filename, dtype=np.uint8)
pipe = Pipeline(1, 3, 0)
with pipe:
jpegs, _ = fn.readers.file(files=[filename])
from_reader = fn.decoders.image(jpegs, device="mixed")
from_constant = fn.decoders.image(file_contents, device="mixed")
pipe.set_outputs(from_constant, from_reader)
pipe.build()
from_reader, from_constant = pipe.run()
check_batch(from_reader, from_constant, 1)
|
DALI-main
|
dali/test/python/operator_1/test_constant.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali as dali
import nvidia.dali.fn as fn
from nvidia.dali.pipeline import Pipeline
import numpy as np
from test_utils import check_batch
from nose_utils import raises
def _test_permutation_generator(allow_repetitions, no_fixed):
batch_size = 10
pipe = Pipeline(batch_size, 1, None)
perm = fn.batch_permutation(allow_repetitions=allow_repetitions, no_fixed_points=no_fixed)
pipe.set_outputs(perm)
pipe.build()
for iter in range(100):
idxs, = pipe.run()
for i in range(batch_size):
assert idxs.at(i).shape == ()
idxs = [int(idxs.at(i)) for i in range(batch_size)]
if allow_repetitions:
assert all(x >= 0 and x < batch_size for x in idxs)
else:
assert list(sorted(idxs)) == list(range(batch_size))
if no_fixed:
assert all(x != i for i, x in enumerate(idxs))
def test_permutation_generator():
for allow_repetitions in [None, False, True]:
for no_fixed in [None, False, True]:
yield _test_permutation_generator, allow_repetitions, no_fixed
def random_sample():
shape = np.random.randint(1, 50, [3])
return np.random.randint(-1000000, 1000000, shape)
def gen_data(batch_size, type):
return [random_sample().astype(type) for _ in range(batch_size)]
def _test_permute_batch(device, type):
batch_size = 10
pipe = Pipeline(batch_size, 4, 0)
data = fn.external_source(
source=lambda: gen_data(batch_size, type),
device=device, layout="abc")
perm = fn.batch_permutation()
pipe.set_outputs(data, fn.permute_batch(data, indices=perm), perm)
pipe.build()
for i in range(10):
orig, permuted, idxs = pipe.run()
idxs = [int(idxs.at(i)) for i in range(batch_size)]
if isinstance(orig, dali.backend.TensorListGPU):
orig = orig.as_cpu()
ref = [orig.at(idx) for idx in idxs]
check_batch(permuted, ref, len(ref), 0, 0, "abc")
def test_permute_batch():
for type in [np.uint8, np.int16, np.uint32, np.int64, np.float32]:
for device in ["cpu", "gpu"]:
yield _test_permute_batch, device, type
def _test_permute_batch_fixed(device):
batch_size = 10
pipe = Pipeline(batch_size, 4, 0)
data = fn.external_source(
source=lambda: gen_data(batch_size, np.int16),
device=device, layout="abc")
idxs = [4, 8, 0, 6, 3, 5, 2, 9, 7, 1]
pipe.set_outputs(data, fn.permute_batch(data, indices=idxs))
pipe.build()
for i in range(10):
orig, permuted = pipe.run()
if isinstance(orig, dali.backend.TensorListGPU):
orig = orig.as_cpu()
ref = [orig.at(idx) for idx in idxs]
check_batch(permuted, ref, len(ref), 0, 0, "abc")
def test_permute_batch_fixed():
for device in ["cpu", "gpu"]:
yield _test_permute_batch_fixed, device
@raises(RuntimeError,
glob="Sample index out of range. * is not a valid index for an input batch of * tensors.")
def _test_permute_batch_out_of_range(device):
batch_size = 10
pipe = Pipeline(batch_size, 4, 0)
data = fn.external_source(
source=lambda: gen_data(batch_size, np.int32),
device=device, layout="abc")
perm = fn.batch_permutation()
pipe.set_outputs(data, fn.permute_batch(data, indices=[0, 1, 2, 3, 4, 5, 10, 7, 8, 9]), perm)
pipe.build()
pipe.run()
def test_permute_batch_out_of_range():
for device in ["cpu", "gpu"]:
yield _test_permute_batch_out_of_range, device
|
DALI-main
|
dali/test/python/operator_1/test_batch_permute.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import numpy as np
import os
from test_utils import compare_pipelines
from test_utils import RandomDataIterator
from test_utils import get_dali_extra_path
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
class FlipPipeline(Pipeline):
def __init__(self, device, batch_size, num_threads=1, device_id=0, num_gpus=1,
is_vertical=0, is_horizontal=1):
super(FlipPipeline, self).__init__(batch_size,
num_threads,
device_id)
self.device = device
self.input = ops.readers.Caffe(
path=caffe_db_folder, shard_id=device_id, num_shards=num_gpus)
self.decode = ops.decoders.Image(
device="cpu", output_type=types.RGB)
self.flip = ops.Flip(
device=self.device, vertical=is_vertical, horizontal=is_horizontal)
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
if self.device == 'gpu':
images = images.gpu()
images = self.flip(images)
return images
class SynthFlipPipeline(Pipeline):
def __init__(self, batch_size, layout, data_iterator, device):
super(SynthFlipPipeline, self).__init__(batch_size, seed=1234, num_threads=4, device_id=0)
self.device = device
self.iterator = data_iterator
self.layout = layout
self.input = ops.ExternalSource()
self.coin = ops.random.CoinFlip(seed=1234)
self.flip = ops.Flip(device=device)
def define_graph(self):
self.data = self.input()
data = self.data.gpu() if self.device == 'gpu' else self.data
flipped = self.flip(data,
horizontal=self.coin(), vertical=self.coin(), depthwise=self.coin())
return flipped
def iter_setup(self):
self.feed_input(self.data, self.iterator.next(), layout=self.layout)
def numpy_flip(data, h_dim, v_dim, d_dim, hor, ver, depth):
if h_dim >= 0 and hor:
data = np.flip(data, h_dim)
if v_dim >= 0 and ver:
data = np.flip(data, v_dim)
if d_dim >= 0 and depth:
data = np.flip(data, d_dim)
return data
def find_dims(layout):
return layout.find("W"), layout.find("H"), layout.find("D")
class SynthPythonFlipPipeline(Pipeline):
def __init__(self, batch_size, layout, data_iterator):
super(SynthPythonFlipPipeline, self).__init__(batch_size, seed=1234, num_threads=4,
device_id=0, exec_async=False,
exec_pipelined=False)
self.iterator = data_iterator
self.layout = layout
self.input = ops.ExternalSource()
self.coin = ops.random.CoinFlip(seed=1234)
h_dim, v_dim, d_dim = find_dims(layout)
def fun(d, hor, ver, depth): return numpy_flip(d, h_dim, v_dim, d_dim, hor, ver, depth)
self.python_flip = ops.PythonFunction(function=fun, output_layouts=layout)
def define_graph(self):
self.data = self.input()
flipped = self.python_flip(self.data, self.coin(), self.coin(), self.coin())
return flipped
def iter_setup(self):
self.feed_input(self.data, self.iterator.next(), layout=self.layout)
def check_flip(batch_size, layout, shape, device):
eiis = [RandomDataIterator(batch_size, shape=shape) for k in range(2)]
compare_pipelines(SynthFlipPipeline(batch_size, layout, iter(eiis[0]), device),
SynthPythonFlipPipeline(batch_size, layout, iter(eiis[1])),
batch_size=batch_size, N_iterations=3)
def test_flip_vs_numpy():
for batch_size in [1, 8, 32]:
for device in ['cpu', 'gpu']:
for layout, shape in [("HWC", (15, 20, 3)),
("CHW", (4, 20, 25)),
("DHWC", (10, 20, 30, 2)),
("CDHW", (2, 5, 10, 15)),
("FHWC", (3, 90, 120, 3)),
("FCHW", (4, 3, 100, 150)),
("FDHWC", (4, 20, 50, 30, 3)),
("FCDHW", (3, 3, 20, 50, 30))]:
yield check_flip, batch_size, layout, shape, device
|
DALI-main
|
dali/test/python/operator_1/test_flip.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali.fn as fn
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.types as types
import os
from test_utils import get_dali_extra_path, dali_type_to_np
test_data_root = get_dali_extra_path()
path = 'db/single'
file_types = {'jpeg', 'mixed', 'png', 'tiff', 'pnm', 'bmp', 'jpeg2k'}
def run_decode(data_path, out_type):
batch_size = 4
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
input, _ = fn.readers.file(file_root=data_path, shard_id=0, num_shards=1, name="reader")
decoded = fn.decoders.image(input, output_type=types.RGB)
decoded_shape = fn.shapes(decoded)
raw_shape = fn.peek_image_shape(input, dtype=out_type)
pipe.set_outputs(decoded, decoded_shape, raw_shape)
pipe.build()
samples = 0
length = pipe.reader_meta(name="reader")['epoch_size']
while samples < length:
samples += batch_size
(images, decoded_shape, raw_shape) = pipe.run()
for i in range(batch_size):
# as we are asking for a particular color space it may
# differ from the source image, so don't compare it
image = images.at(i)
shape_type = np.int64 if out_type is None else dali_type_to_np(out_type)
for d in range(len(image.shape) - 1):
assert image.shape[d] == decoded_shape.at(i)[d], \
"{} vs {}".format(image.shape[d], decoded_shape.at(i)[d])
assert image.shape[d] == raw_shape.at(i)[d], \
"{} vs {}".format(image.shape[d], raw_shape.at(i)[d])
assert raw_shape.at(i)[d].dtype == shape_type, \
"{} vs {}".format(raw_shape.at(i)[d].dtyp, shape_type)
test_types = [None,
types.INT32, types.UINT32,
types.INT64, types.UINT64,
types.FLOAT, types.FLOAT64]
def test_operator_peek_image_shape():
for img_type in file_types:
for out_type in test_types:
data_path = os.path.join(test_data_root, path, img_type)
yield run_decode, data_path, out_type
|
DALI-main
|
dali/test/python/operator_1/test_peek_image_shape.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali as dali
from nvidia.dali.backend_impl import TensorListGPU
from nvidia.dali.pipeline import Pipeline
def random_shape(max_shape, diff=100):
# Produces a random shape close to the max shape
for s in max_shape:
assert s > diff
return np.array([np.random.randint(s - diff, s) for s in max_shape], dtype=np.int32)
def check_coin_flip(device='cpu', batch_size=32, max_shape=[1e5], p=None,
use_shape_like_input=False):
pipe = Pipeline(batch_size=batch_size, device_id=0, num_threads=3, seed=123456)
with pipe:
def shape_gen_f(): return random_shape(max_shape)
shape_arg = None
inputs = []
shape_out = None
if max_shape is not None:
if use_shape_like_input:
shape_like_in = dali.fn.external_source(lambda: np.zeros(shape_gen_f()),
device=device, batch=False)
inputs += [shape_like_in]
shape_out = dali.fn.shapes(shape_like_in)
else:
shape_arg = dali.fn.external_source(shape_gen_f, batch=False)
shape_out = shape_arg
outputs = [dali.fn.random.coin_flip(*inputs, device=device, probability=p, shape=shape_arg)]
if shape_out is not None:
outputs += [shape_out]
pipe.set_outputs(*outputs)
pipe.build()
outputs = pipe.run()
data_out = outputs[0].as_cpu() if isinstance(outputs[0], TensorListGPU) else outputs[0]
shapes_out = None
if max_shape is not None:
shapes_out = outputs[1].as_cpu() if isinstance(outputs[1], TensorListGPU) else outputs[1]
p = p if p is not None else 0.5
for i in range(batch_size):
data = np.array(data_out[i])
assert np.logical_or(data == 0, data == 1).all()
if max_shape is not None:
sample_shape = np.array(shapes_out[i])
assert (data.shape == sample_shape).all()
total = len(data)
positive = np.count_nonzero(data)
np.testing.assert_allclose(p, positive / total, atol=0.005) # +/- -.5%
def test_coin_flip():
batch_size = 8
for device in ['cpu', 'gpu']:
for max_shape, use_shape_like_in in [
([100000], False),
([100000], True),
(None, False)
]:
for probability in [None, 0.7, 0.5, 0.0, 1.0]:
yield check_coin_flip, device, batch_size, max_shape, probability, use_shape_like_in
|
DALI-main
|
dali/test/python/operator_1/test_coin_flip.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
from nvidia.dali.backend_impl import TensorListGPU
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import numpy as np
import scipy.stats as st
import random
test_types = [types.INT8, types.INT16, types.INT32, types.FLOAT, types.FLOAT64]
def random_shape(max_shape):
return np.array(
[1 if s == 1 else np.random.randint(1, s) for s in max_shape],
dtype=np.int32
)
def random_shape_or_empty(max_shape):
empty_sh = random.choice([True, False])
if empty_sh:
return np.array([200, 0, 3], np.int32)
else:
return np.array(
[1 if s == 1 else np.random.randint(1, s) for s in max_shape],
dtype=np.int32
)
def check_normal_distribution(device, dtype, shape=None, use_shape_like_input=False,
variable_shape=False, mean=0.0, stddev=1.0,
variable_dist_params=False, shape_gen_f=None,
niter=3, batch_size=3, device_id=0, num_threads=3):
pipe = Pipeline(batch_size=batch_size, device_id=device_id,
num_threads=num_threads, seed=123456)
with pipe:
shape_like_in = None
shape_arg = None
assert shape is None or shape_gen_f is None
if variable_shape:
if shape_gen_f is None:
def shape_gen_f(): return random_shape(shape)
if use_shape_like_input:
shape_like_in = fn.external_source(lambda: np.zeros(shape_gen_f()),
device=device, batch=False)
shape_out = fn.shapes(shape_like_in)
else:
shape_arg = fn.external_source(shape_gen_f, batch=False)
shape_out = shape_arg
else:
if use_shape_like_input:
shape_like_in = np.zeros(shape)
else:
shape_arg = shape
# Can't make an empty list constant
shape_out = types.Constant(
shape if shape is not None and shape != () else (1,),
dtype=types.INT32
)
mean_arg = None
stddev_arg = None
if variable_dist_params:
mean_arg = fn.external_source(
lambda: np.array(np.random.uniform(low=-100.0, high=100.0), dtype=np.float32),
device='cpu', batch=False
)
stddev_arg = fn.external_source(
lambda: np.array(np.random.uniform(low=1.0, high=100.0), dtype=np.float32),
device='cpu', batch=False
)
else:
mean_arg = mean
stddev_arg = stddev
inputs = [shape_like_in] if shape_like_in is not None else []
out = fn.random.normal(
*inputs, device=device, shape=shape_arg, mean=mean_arg, stddev=stddev_arg, dtype=dtype
)
pipe.set_outputs(out, shape_out, mean_arg, stddev_arg)
pipe.build()
for i in range(niter):
outputs = pipe.run()
out, shapes, means, stddevs = tuple(
outputs[i].as_cpu() if isinstance(outputs[i], TensorListGPU)
else outputs[i] for i in range(len(outputs))
)
for sample_idx in range(batch_size):
sample = np.array(out[sample_idx])
if sample.shape == ():
continue
sample_shape = np.array(shapes[sample_idx])
mean = np.array(means[sample_idx])
stddev = np.array(stddevs[sample_idx])
assert (sample.shape == sample_shape).all(), f"{sample.shape} != {sample_shape}"
data = sample.flatten()
data_len = len(data)
# Checking sanity of the data
if data_len >= 100 and dtype in [types.FLOAT, types.FLOAT64]:
# Empirical rule:
# ~68% of the observations within one standard deviation
# ~95% of the observations within one standard deviation
# ~99.7% of the observations within one standard deviation
within_1stddevs = np.where((data > (mean - 1 * stddev)) &
(data < (mean + 1 * stddev)))
p1 = len(within_1stddevs[0]) / data_len
within_2stddevs = np.where((data > (mean - 2 * stddev)) &
(data < (mean + 2 * stddev)))
p2 = len(within_2stddevs[0]) / data_len
within_3stddevs = np.where((data > (mean - 3 * stddev)) &
(data < (mean + 3 * stddev)))
p3 = len(within_3stddevs[0]) / data_len
assert p3 > 0.9, f"{p3}" # leave some room
assert p2 > 0.8, f"{p2}" # leave some room
assert p1 > 0.5, f"{p1}" # leave some room
# It's not 100% mathematically correct, but makes do in case of this test
_, pvalues_anderson, _ = st.anderson(data, dist='norm')
assert pvalues_anderson[2] > 0.5
def test_normal_distribution():
niter = 3
batch_size = 3
for device in ("cpu", "gpu"):
for dtype in test_types:
for mean, stddev, variable_dist_params in \
[(0.0, 1.0, False), (111.0, 57.0, False), (0.0, 0.0, True)]:
for shape in [(100,), (10, 20, 30), (1, 2, 3, 4, 5, 6)]:
use_shape_like_in = False if shape is None else random.choice([True, False])
variable_shape = random.choice([True, False])
shape_arg = None
if variable_shape:
def shape_gen_f(): return random_shape(shape)
else:
shape_arg = shape
shape_gen_f = None
yield check_normal_distribution, \
device, dtype, shape_arg, use_shape_like_in, variable_shape, \
mean, stddev, variable_dist_params, shape_gen_f, niter, batch_size
def test_normal_distribution_scalar_and_one_elem():
niter = 3
batch_size = 3
mean = 100.0
stddev = 20.0
for device in ("cpu", "gpu"):
for dtype in [types.FLOAT, types.INT16]:
for shape in [None, (), (1,)]:
yield check_normal_distribution, device, dtype, shape, False, False, \
mean, stddev, False, None, niter, batch_size
def test_normal_distribution_empty_shapes():
niter = 3
batch_size = 20
dtype = types.FLOAT
mean = 100.0
stddev = 20.0
max_shape = (200, 300, 3)
for device in ("cpu", "gpu"):
yield check_normal_distribution, device, dtype, (0,), False, False, \
mean, stddev, False, None, niter, batch_size
yield check_normal_distribution, device, dtype, None, False, False, \
mean, stddev, False, lambda: random_shape_or_empty(max_shape), niter, batch_size
|
DALI-main
|
dali/test/python/operator_1/test_normal_distribution.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import pipeline_def
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import random
import numpy as np
import os
from test_utils import get_dali_extra_path
from test_noise_utils import PSNR
test_data_root = get_dali_extra_path()
images_dir = os.path.join(test_data_root, 'db', 'single', 'png')
dump_images = False
def shot_noise_ref(x, factor):
x = np.array(x, dtype=np.float32)
return (np.clip(np.random.poisson(x / factor) * factor, 0, 255)).astype(np.uint8)
@pipeline_def
def pipe_shot_noise(factor, device='cpu'):
encoded, _ = fn.readers.file(file_root=images_dir)
in_data = fn.decoders.image(encoded, device="cpu", output_type=types.RGB)
if device == 'gpu':
in_data = in_data.gpu()
factor_arg = factor or fn.random.uniform(range=(0.1, 100.0))
out_data = fn.noise.shot(in_data, factor=factor_arg)
return in_data, out_data, factor_arg
def _testimpl_operator_noise_shot(device, factor, batch_size, niter):
pipe = pipe_shot_noise(factor, device=device, batch_size=batch_size,
num_threads=3, device_id=0, seed=12345)
pipe.build()
for _ in range(niter):
out_data, in_data, factor_arg = pipe.run()
factor_arg = factor_arg.as_array()
if device == 'gpu':
out_data = out_data.as_cpu()
in_data = in_data.as_cpu()
for s in range(batch_size):
sample_in = np.array(out_data[s])
sample_out = np.array(in_data[s])
factor = factor_arg[s]
sample_ref = shot_noise_ref(sample_in, factor)
psnr_out = PSNR(sample_out, sample_in)
psnr_ref = PSNR(sample_ref, sample_in)
np.testing.assert_allclose(psnr_out, psnr_ref, atol=1)
if dump_images:
import cv2
cv2.imwrite(f"./shotnoise_ref_p{factor}_s{s}.png",
cv2.cvtColor(sample_ref, cv2.COLOR_BGR2RGB))
cv2.imwrite(f"./shotnoise_out_p{factor}_s{s}.png",
cv2.cvtColor(sample_out, cv2.COLOR_BGR2RGB))
def test_operator_noise_shot():
niter = 3
factors = [None, 0.2, 4, 21.25, 85]
for device in ("cpu", "gpu"):
for factor in factors:
batch_size = random.choice([1, 3])
yield _testimpl_operator_noise_shot, device, factor, batch_size, niter
|
DALI-main
|
dali/test/python/operator_1/test_noise_shot.py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import numpy as np
import math
import os
import cv2
import itertools
from nose2.tools import params
from test_utils import get_dali_extra_path
data_root = get_dali_extra_path()
img_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
def get_pipeline(device, batch_size, tile, ratio, angle):
pipe = Pipeline(batch_size, 4, 0)
with pipe:
input, _ = fn.readers.file(file_root=img_dir)
decoded = fn.decoders.image(input, device='cpu', output_type=types.RGB)
decoded = decoded.gpu() if device == 'gpu' else decoded
grided = fn.grid_mask(decoded, device=device, tile=tile, ratio=ratio, angle=angle)
pipe.set_outputs(grided, decoded)
return pipe
def get_random_pipeline(device, batch_size):
pipe = Pipeline(batch_size, 4, 0)
with pipe:
input, _ = fn.readers.file(file_root=img_dir)
decoded = fn.decoders.image(input, device='cpu', output_type=types.RGB)
decoded = decoded.gpu() if device == 'gpu' else decoded
tile = fn.cast(fn.random.uniform(range=(50, 200)), dtype=types.INT32)
ratio = fn.random.uniform(range=(0.3, 0.7))
angle = fn.random.uniform(range=(-math.pi, math.pi))
grided = fn.grid_mask(decoded, device=device, tile=tile, ratio=ratio, angle=angle)
pipe.set_outputs(grided, decoded, tile, ratio, angle)
return pipe
def get_mask(w, h, tile, ratio, angle, d):
ca = math.cos(angle)
sa = math.sin(angle)
b = tile * ratio
i = np.tile(np.arange(w), (h, 1))
j = np.transpose(np.tile(np.arange(h), (w, 1)))
x = i * ca - j * sa
y = i * sa + j * ca
m = np.logical_or(((x + d) % tile > b + 2 * d), ((y + d) % tile > b + 2 * d))
return m
def check(result, input, tile, ratio, angle):
result = np.uint8(result)
input = np.uint8(input)
w = result.shape[1]
h = result.shape[0]
eps = 0.1
# inside of squares should be black
mask = np.uint8(1 - get_mask(w, h, tile, ratio, angle, -eps))
result2 = cv2.bitwise_and(result, result, mask=mask)
assert not np.any(result2)
# outside of squares should be same as input
mask = np.uint8(get_mask(w, h, tile, ratio, angle, eps))
result2 = cv2.bitwise_and(result, result, mask=mask)
input2 = cv2.bitwise_and(input, input, mask=mask)
assert np.all(result2 == input2)
def run_test(batch_size, device, tile, ratio, angle):
pipe = get_pipeline(device, batch_size, tile, ratio, angle)
pipe.build()
results, inputs = pipe.run()
if device == 'gpu':
results, inputs = results.as_cpu(), inputs.as_cpu()
for i in range(batch_size):
check(results[i], inputs[i], tile, ratio, angle)
devices = ['cpu', 'gpu']
args = [
(40, 0.5, 0),
(100, 0.1, math.pi / 2),
(200, 0.7, math.pi / 3),
(150, 1 / 3, math.pi / 4),
(50, 0.532, 1),
(51, 0.38158387, 2.6810782),
(123, 0.456, 0.789)]
@params(*itertools.product(devices, args))
def test_gridmask_vs_cv(device, args):
batch_size = 4
tile, ratio, angle = args
run_test(batch_size, device, tile, ratio, angle)
def run_random_test(batch_size, device):
pipe = get_random_pipeline(device, batch_size)
pipe.build()
for _ in range(16):
results, inputs, tiles, ratios, angles = pipe.run()
if device == 'gpu':
results, inputs = results.as_cpu(), inputs.as_cpu()
for i in range(batch_size):
tile = np.int32(tiles[i])
ratio = np.float32(ratios[i])
angle = np.float32(angles[i])
check(results[i], inputs[i], tile, ratio, angle)
@params(*devices)
def test_gridmask_vs_cv_random(device):
batch_size = 4
run_random_test(batch_size, device)
|
DALI-main
|
dali/test/python/operator_1/test_gridmask.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numba
import numpy as np
import os
from nvidia.dali import pipeline_def
import nvidia.dali as dali
import nvidia.dali.fn as fn
import nvidia.dali.types as dali_types
import platform
from nose import SkipTest, with_setup
from test_utils import get_dali_extra_path, to_array
from nvidia.dali.plugin.numba.fn.experimental import numba_function
from distutils.version import LooseVersion
from numba import cuda
test_data_root = get_dali_extra_path()
lmdb_folder = os.path.join(test_data_root, 'db', 'lmdb')
def check_env_compatibility():
# At present (as of Numba 0.57) there's a bug in LLVM JIT linker that makes the tests fail
# randomly on 64-bit ARM platform.
#
# Numba bug:
# https://github.com/numba/numba/issues/8567
#
# TODO(michalz): Update the Numba version range when there's a fix - or possibly check
# llvmlite directly (if still applicable)
if platform.processor().lower() in ('arm64', 'aarch64', 'armv8') \
and LooseVersion(numba.__version__) >= LooseVersion('0.57.0'):
raise SkipTest()
def set_all_values_to_255_batch(out0, in0):
out0[0][:] = 255
def set_all_values_to_255_sample(out0, in0):
out0[:] = 255
def set_all_values_to_255_sample_gpu(out0, in0):
tx, ty, tz = cuda.grid(3)
x_s, y_s, z_s = cuda.gridsize(3)
for z in range(tz, out0.shape[0], z_s):
for y in range(ty, out0.shape[1], y_s):
for x in range(tx, out0.shape[2], x_s):
out0[z][y][x] = 255
def set_all_values_to_float_batch(out0, in0):
out0[0][:] = 0.5
def set_all_values_to_float_sample(out0, in0):
out0[:] = 0.5
def set_all_values_to_float_sample_gpu(out0, in0):
tx, ty, tz = cuda.grid(3)
x_s, y_s, z_s = cuda.gridsize(3)
for z in range(tz, out0.shape[0], z_s):
for y in range(ty, out0.shape[1], y_s):
for x in range(tx, out0.shape[2], x_s):
out0[z][y][x] = 0.5
def setup_change_out_shape(out_shape, in_shape):
out0_shape = out_shape[0]
in0_shape = in_shape[0]
perm = [1, 2, 0]
for sample_idx in range(len(out0_shape)):
for d in range(len(perm)):
out0_shape[sample_idx][d] = in0_shape[sample_idx][perm[d]]
def change_out_shape_batch(out0, in0):
for sample_id in range(len(out0)):
out0[sample_id][:] = 42
def change_out_shape_sample(out0, in0):
out0[:] = 42
def change_out_shape_sample_gpu(out0, in0):
tx, ty, tz = cuda.grid(3)
x_s, y_s, z_s = cuda.gridsize(3)
for z in range(tz, out0.shape[0], z_s):
for y in range(ty, out0.shape[1], y_s):
for x in range(tx, out0.shape[2], x_s):
out0[z][y][x] = 42
# in shape [x] -> out shape [2, 2, 2, x]
def change_ndim_setup(outs_shape, ins_shapes):
out_shape = outs_shape[0]
in_shape = ins_shapes[0]
for sample_id in range(len(out_shape)):
out_shape[sample_id][:] = [2, 2, 2, in_shape[sample_id][0]]
def change_ndim_gpu(out0, in0):
tx, ty = cuda.grid(2)
x_s, y_s = cuda.gridsize(2)
tid = ty * x_s + tx
for i in range(2):
for j in range(2):
for k in range(2):
for x in range(tid, out0.shape[3], x_s * y_s):
out0[i][j][k][x] = x
def change_dim_expected_out(d):
return np.array(list(range(d)) * 8).reshape(2, 2, 2, d)
def get_data(shapes, dtype):
return [np.empty(shape, dtype=dtype) for shape in shapes]
def get_data_zeros(shapes, dtype):
return [np.zeros(shape, dtype=dtype) for shape in shapes]
@pipeline_def
def numba_func_pipe(shapes, dtype, device="cpu", run_fn=None, out_types=None, in_types=None,
outs_ndim=None, ins_ndim=None, setup_fn=None, batch_processing=None,
blocks=None, threads_per_block=None):
data = fn.external_source(lambda: get_data(shapes, dtype), batch=True, device=device)
return numba_function(
data, run_fn=run_fn, out_types=out_types, in_types=in_types,
outs_ndim=outs_ndim, ins_ndim=ins_ndim, setup_fn=setup_fn,
batch_processing=batch_processing, device=device,
blocks=blocks, threads_per_block=threads_per_block)
def _testimpl_numba_func(device, shapes, dtype, run_fn, out_types, in_types,
outs_ndim, ins_ndim, setup_fn, batch_processing, expected_out,
blocks=None, threads_per_block=None):
batch_size = len(shapes)
pipe = numba_func_pipe(
batch_size=batch_size, num_threads=1, device_id=0,
shapes=shapes, dtype=dtype, device=device,
run_fn=run_fn, setup_fn=setup_fn, out_types=out_types,
in_types=in_types, outs_ndim=outs_ndim, ins_ndim=ins_ndim,
batch_processing=batch_processing, blocks=blocks, threads_per_block=threads_per_block)
pipe.build()
for it in range(3):
outs = pipe.run()
for i in range(batch_size):
out_arr = to_array(outs[0][i])
assert np.array_equal(out_arr, expected_out[i])
@with_setup(check_env_compatibility)
def test_numba_func():
# shape, dtype, run_fn, out_types,
# in_types, out_ndim, in_ndim, setup_fn, batch_processing,
# expected_out
args = [
([(10, 10, 10)], np.uint8, set_all_values_to_255_batch, [dali_types.UINT8],
[dali_types.UINT8], [3], [3], None, True,
[np.full((10, 10, 10), 255, dtype=np.uint8)]),
([(10, 10, 10)], np.uint8, set_all_values_to_255_sample, [dali_types.UINT8],
[dali_types.UINT8], [3], [3], None, None,
[np.full((10, 10, 10), 255, dtype=np.uint8)]),
([(10, 10, 10)], np.float32, set_all_values_to_float_batch, [dali_types.FLOAT],
[dali_types.FLOAT], [3], [3], None, True,
[np.full((10, 10, 10), 0.5, dtype=np.float32)]),
([(10, 10, 10)], np.float32, set_all_values_to_float_sample, [dali_types.FLOAT],
[dali_types.FLOAT], [3], [3], None, None,
[np.full((10, 10, 10), 0.5, dtype=np.float32)]),
([(10, 20, 30), (20, 10, 30)], np.int64, change_out_shape_batch, [dali_types.INT64],
[dali_types.INT64], [3], [3], setup_change_out_shape, True,
[np.full((20, 30, 10), 42, dtype=np.int32),
np.full((10, 30, 20), 42, dtype=np.int32)]),
([(10, 20, 30), (20, 10, 30)], np.int64, change_out_shape_sample, [dali_types.INT64],
[dali_types.INT64], [3], [3], setup_change_out_shape, None,
[np.full((20, 30, 10), 42, dtype=np.int32),
np.full((10, 30, 20), 42, dtype=np.int32)]),
]
device = "cpu"
for shape, dtype, run_fn, out_types, in_types, outs_ndim, ins_ndim, \
setup_fn, batch_processing, expected_out in args:
yield _testimpl_numba_func, \
device, shape, dtype, run_fn, out_types, in_types, outs_ndim, ins_ndim, \
setup_fn, batch_processing, expected_out
def test_numba_func_gpu():
# shape, dtype, run_fn, out_types,
# in_types, out_ndim, in_ndim, setup_fn, batch_processing,
# expected_out
args = [
([(10, 10, 10)], np.uint8, set_all_values_to_255_sample_gpu, [dali_types.UINT8],
[dali_types.UINT8], [3], [3], None, None,
[np.full((10, 10, 10), 255, dtype=np.uint8)]),
([(10, 10, 10)], np.float32, set_all_values_to_float_sample_gpu, [dali_types.FLOAT],
[dali_types.FLOAT], [3], [3], None, None,
[np.full((10, 10, 10), 0.5, dtype=np.float32)]),
([(100, 20, 30), (20, 100, 30)], np.int64, change_out_shape_sample_gpu, [dali_types.INT64],
[dali_types.INT64], [3], [3], setup_change_out_shape, None,
[np.full((20, 30, 100), 42, dtype=np.int32),
np.full((100, 30, 20), 42, dtype=np.int32)]),
([(20), (30)], np.int32, change_ndim_gpu, [dali_types.INT32], [dali_types.INT32], [4], [1],
change_ndim_setup, None,
[change_dim_expected_out(20), change_dim_expected_out(30)]),
]
device = "gpu"
blocks = [32, 32, 1]
threads_per_block = [32, 16, 1]
for shape, dtype, run_fn, out_types, in_types, outs_ndim, ins_ndim, \
setup_fn, batch_processing, expected_out in args:
yield _testimpl_numba_func, \
device, shape, dtype, run_fn, out_types, in_types, outs_ndim, ins_ndim, \
setup_fn, batch_processing, expected_out, blocks, threads_per_block
@pipeline_def
def numba_func_image_pipe(device="cpu", run_fn=None, out_types=None, in_types=None,
outs_ndim=None, ins_ndim=None, setup_fn=None, batch_processing=None,
blocks=None, threads_per_block=None):
files, _ = dali.fn.readers.caffe(path=lmdb_folder, random_shuffle=True)
dec_device = "cpu" if device == "cpu" else "mixed"
images_in = dali.fn.decoders.image(files, device=dec_device)
images_out = numba_function(
images_in, run_fn=run_fn, out_types=out_types, in_types=in_types,
outs_ndim=outs_ndim, ins_ndim=ins_ndim, setup_fn=setup_fn,
batch_processing=batch_processing, device=device,
blocks=blocks, threads_per_block=threads_per_block)
return images_in, images_out
def _testimpl_numba_func_image(device, run_fn, out_types, in_types,
outs_ndim, ins_ndim, setup_fn, batch_processing, transform,
blocks=None, threads_per_block=None):
pipe = numba_func_image_pipe(
device=device, batch_size=8, num_threads=3, device_id=0, run_fn=run_fn, setup_fn=setup_fn,
out_types=out_types, in_types=in_types, outs_ndim=outs_ndim, ins_ndim=ins_ndim,
batch_processing=batch_processing, blocks=blocks, threads_per_block=threads_per_block)
pipe.build()
for _ in range(3):
images_in, images_out = pipe.run()
for i in range(len(images_in)):
image_in_transformed = transform(to_array(images_in[i]))
assert np.array_equal(image_in_transformed, to_array(images_out[i]))
def reverse_col_batch(out0, in0):
for sample_id in range(len(out0)):
out0[sample_id][:] = 255 - in0[sample_id][:]
def reverse_col_sample(out0, in0):
out0[:] = 255 - in0[:]
def reverse_col_sample_gpu(out0, in0):
tx, ty, tz = cuda.grid(3)
x_s, y_s, z_s = cuda.gridsize(3)
for z in range(tz, out0.shape[0], z_s):
for y in range(ty, out0.shape[1], y_s):
for x in range(tx, out0.shape[2], x_s):
out0[z][y][x] = 255 - in0[z][y][x]
def rot_image_batch(out0, in0):
for out_sample, in_sample in zip(out0, in0):
for i in range(out_sample.shape[0]):
for j in range(out_sample.shape[1]):
out_sample[i][j] = in_sample[j][out_sample.shape[0] - i - 1]
def rot_image_sample(out0, in0):
for i in range(out0.shape[0]):
for j in range(out0.shape[1]):
out0[i][j] = in0[j][out0.shape[0] - i - 1]
def rot_image_sample_gpu(out0, in0):
tx, ty, tz = cuda.grid(3)
x_s, y_s, z_s = cuda.gridsize(3)
for z in range(tz, out0.shape[0], z_s):
for y in range(ty, out0.shape[1], y_s):
for x in range(tx, out0.shape[2], x_s):
out0[z][y][x] = in0[y][out0.shape[0] - z - 1][x]
def rot_image_setup(outs, ins):
out0 = outs[0]
in0 = ins[0]
for sample_id in range(len(out0)):
out0[sample_id][0] = in0[sample_id][1]
out0[sample_id][1] = in0[sample_id][0]
out0[sample_id][2] = in0[sample_id][2]
@with_setup(check_env_compatibility)
def test_numba_func_image():
args = [
(reverse_col_batch, [dali_types.UINT8], [dali_types.UINT8],
[3], [3], None, True, lambda x: 255 - x),
(reverse_col_sample, [dali_types.UINT8], [dali_types.UINT8],
[3], [3], None, None, lambda x: 255 - x),
(rot_image_batch, [dali_types.UINT8], [dali_types.UINT8],
[3], [3], rot_image_setup, True, lambda x: np.rot90(x)),
(rot_image_sample, [dali_types.UINT8], [dali_types.UINT8],
[3], [3], rot_image_setup, None, lambda x: np.rot90(x)),
]
device = "cpu"
for run_fn, out_types, in_types, outs_ndim, ins_ndim, \
setup_fn, batch_processing, transform in args:
yield _testimpl_numba_func_image, \
device, run_fn, out_types, in_types, outs_ndim, ins_ndim, \
setup_fn, batch_processing, transform
def test_numba_func_image_gpu():
args = [
(reverse_col_sample_gpu, [dali_types.UINT8], [dali_types.UINT8],
[3], [3], None, None, lambda x: 255 - x),
(rot_image_sample_gpu, [dali_types.UINT8], [dali_types.UINT8],
[3], [3], rot_image_setup, None, np.rot90),
]
device = "gpu"
blocks = [32, 32, 1]
threads_per_block = [32, 8, 1]
for run_fn, out_types, in_types, outs_ndim, ins_ndim, \
setup_fn, batch_processing, transform in args:
yield _testimpl_numba_func_image, \
device, run_fn, out_types, in_types, outs_ndim, ins_ndim, \
setup_fn, batch_processing, transform, blocks, threads_per_block
def split_images_col_sample(out0, out1, out2, in0):
for i in range(in0.shape[0]):
for j in range(in0.shape[1]):
out0[i][j] = in0[i][j][0]
out1[i][j] = in0[i][j][1]
out2[i][j] = in0[i][j][2]
def split_images_col_sample_gpu(out0, out1, out2, in0):
tx, ty = cuda.grid(2)
x_s, y_s = cuda.gridsize(2)
for y in range(ty, out0.shape[0], y_s):
for x in range(tx, out0.shape[1], x_s):
out0[y][x] = in0[y][x][0]
out1[y][x] = in0[y][x][1]
out2[y][x] = in0[y][x][2]
def setup_split_images_col(outs, ins):
out0 = outs[0]
out1 = outs[1]
out2 = outs[2]
for sample_id in range(len(out0)):
out0[sample_id][0] = ins[0][sample_id][0]
out0[sample_id][1] = ins[0][sample_id][1]
out1[sample_id][0] = ins[0][sample_id][0]
out1[sample_id][1] = ins[0][sample_id][1]
out2[sample_id][0] = ins[0][sample_id][0]
out2[sample_id][1] = ins[0][sample_id][1]
@pipeline_def
def numba_func_split_image_pipe(run_fn=None, out_types=None, in_types=None,
outs_ndim=None, ins_ndim=None, setup_fn=None,
batch_processing=None, device="cpu",
blocks=None, threads_per_block=None):
files, _ = dali.fn.readers.caffe(path=lmdb_folder)
dec_device = "cpu" if device == "cpu" else "mixed"
images_in = dali.fn.decoders.image(files, device=dec_device)
out0, out1, out2 = numba_function(
images_in, run_fn=run_fn, out_types=out_types, in_types=in_types,
outs_ndim=outs_ndim, ins_ndim=ins_ndim, setup_fn=setup_fn,
batch_processing=batch_processing, device=device,
blocks=blocks, threads_per_block=threads_per_block)
return images_in, out0, out1, out2
@with_setup(check_env_compatibility)
def test_split_images_col():
pipe = numba_func_split_image_pipe(
batch_size=8, num_threads=1, device_id=0,
run_fn=split_images_col_sample, setup_fn=setup_split_images_col,
out_types=[dali_types.UINT8 for i in range(3)],
in_types=[dali_types.UINT8],
outs_ndim=[2, 2, 2],
ins_ndim=[3],
device="cpu")
pipe.build()
for _ in range(3):
images_in, R, G, B = pipe.run()
for i in range(len(images_in)):
assert np.array_equal(
images_in.at(i), np.stack([R.at(i), G.at(i), B.at(i)], axis=2))
def test_split_images_col_gpu():
blocks = [32, 32, 1]
threads_per_block = [32, 8, 1]
pipe = numba_func_split_image_pipe(
batch_size=8, num_threads=1, device_id=0,
run_fn=split_images_col_sample_gpu, setup_fn=setup_split_images_col,
out_types=[dali_types.UINT8 for i in range(3)],
in_types=[dali_types.UINT8],
outs_ndim=[2, 2, 2],
ins_ndim=[3],
device="gpu",
blocks=blocks, threads_per_block=threads_per_block)
pipe.build()
for _ in range(3):
images_in, R, G, B = pipe.run()
for i in range(len(images_in)):
assert np.array_equal(
to_array(images_in[i]),
np.stack([to_array(R[i]), to_array(G[i]), to_array(B[i])], axis=2))
def multiple_ins_setup(outs, ins):
out0 = outs[0]
in0 = ins[0]
for sample_id in range(len(out0)):
out0[sample_id][0] = in0[sample_id][0]
out0[sample_id][1] = in0[sample_id][1]
out0[sample_id][2] = 3
def multiple_ins_run(out0, in0, in1, in2):
for i in range(out0.shape[0]):
for j in range(out0.shape[1]):
out0[i][j][0] = in0[i][j]
out0[i][j][1] = in1[i][j]
out0[i][j][2] = in2[i][j]
def multiple_ins_run_gpu(out0, in0, in1, in2):
tx, ty = cuda.grid(2)
x_s, y_s = cuda.gridsize(2)
for y in range(ty, out0.shape[0], y_s):
for x in range(tx, out0.shape[1], x_s):
out0[y][x][0] = in0[y][x]
out0[y][x][1] = in1[y][x]
out0[y][x][2] = in2[y][x]
@pipeline_def
def numba_multiple_ins_pipe(shapes, dtype, run_fn=None, out_types=None, in_types=None,
outs_ndim=None, ins_ndim=None, setup_fn=None, batch_processing=None,
device="cpu", blocks=None, threads_per_block=None):
data0 = fn.external_source(
lambda: get_data_zeros(shapes, dtype), batch=True, device=device)
data1 = fn.external_source(
lambda: get_data_zeros(shapes, dtype), batch=True, device=device)
data2 = fn.external_source(
lambda: get_data_zeros(shapes, dtype), batch=True, device=device)
return numba_function(
data0, data1, data2, run_fn=run_fn, out_types=out_types, in_types=in_types,
outs_ndim=outs_ndim, ins_ndim=ins_ndim, setup_fn=setup_fn,
batch_processing=batch_processing, device=device,
blocks=blocks, threads_per_block=threads_per_block)
@with_setup(check_env_compatibility)
def test_multiple_ins():
pipe = numba_multiple_ins_pipe(
shapes=[(10, 10)], dtype=np.uint8, batch_size=8, num_threads=1, device_id=0,
run_fn=multiple_ins_run,
setup_fn=multiple_ins_setup,
out_types=[dali_types.UINT8],
in_types=[dali_types.UINT8 for i in range(3)],
outs_ndim=[3], ins_ndim=[2, 2, 2],
device="cpu")
pipe.build()
for _ in range(3):
outs = pipe.run()
out_arr = np.array(outs[0][0])
assert np.array_equal(out_arr, np.zeros((10, 10, 3), dtype=np.uint8))
def test_multiple_ins_gpu():
blocks = [32, 32, 1]
threads_per_block = [32, 8, 1]
pipe = numba_multiple_ins_pipe(
shapes=[(10, 10)], dtype=np.uint8, batch_size=8, num_threads=1, device_id=0,
run_fn=multiple_ins_run_gpu,
setup_fn=multiple_ins_setup,
out_types=[dali_types.UINT8],
in_types=[dali_types.UINT8 for i in range(3)],
outs_ndim=[3], ins_ndim=[2, 2, 2],
device="gpu",
blocks=blocks, threads_per_block=threads_per_block)
pipe.build()
for _ in range(3):
outs = pipe.run()
out_arr = to_array(outs[0][0])
assert np.array_equal(out_arr, np.zeros((10, 10, 3), dtype=np.uint8))
def nonuniform_types_setup(outs, ins):
out0 = outs[0]
out1 = outs[1]
in0 = ins[0]
for sample_id in range(len(out0)):
out0[sample_id][0] = in0[sample_id][0]
out0[sample_id][1] = in0[sample_id][1]
out0[sample_id][2] = in0[sample_id][2]
out1[sample_id][0] = 3
def nonuniform_types_run_cpu(out_img, out_shape, in_img):
out_img[:] = 255 - in_img[:]
out_shape[:] = out_img.shape
def nonuniform_types_run_gpu(out0, out_shape, in0):
tx, ty, tz = cuda.grid(3)
x_s, y_s, z_s = cuda.gridsize(3)
if tx + ty + tz == 0:
out_shape[:] = out0.shape
for z in range(tz, out0.shape[0], z_s):
for y in range(ty, out0.shape[1], y_s):
for x in range(tx, out0.shape[2], x_s):
out0[z][y][x] = 255 - in0[z][y][x]
@pipeline_def
def nonuniform_types_pipe(run_fn=None, out_types=None, in_types=None,
outs_ndim=None, ins_ndim=None, setup_fn=nonuniform_types_setup,
batch_processing=False, device="cpu",
blocks=None, threads_per_block=None):
files, _ = dali.fn.readers.caffe(path=lmdb_folder)
dec_device = "cpu" if device == "cpu" else "mixed"
images_in = dali.fn.decoders.image(files, device=dec_device)
out_img, out_shape = numba_function(
images_in, run_fn=run_fn, out_types=out_types, in_types=in_types,
outs_ndim=outs_ndim, ins_ndim=ins_ndim, setup_fn=setup_fn,
batch_processing=batch_processing, device=device,
blocks=blocks, threads_per_block=threads_per_block)
return images_in, out_img, out_shape
@with_setup(check_env_compatibility)
def test_nonuniform_types_cpu():
pipe = nonuniform_types_pipe(
batch_size=8, num_threads=1, device_id=0,
run_fn=nonuniform_types_run_cpu,
out_types=[dali_types.UINT8, dali_types.INT64],
in_types=[dali_types.UINT8],
outs_ndim=[3, 1],
ins_ndim=[3],
device="cpu")
pipe.build()
for _ in range(3):
images_in, images_out, img_shape = pipe.run()
for i in range(len(images_in)):
assert np.array_equal(255 - images_in.at(i), images_out.at(i))
assert np.array_equal(images_out.at(i).shape, img_shape.at(i))
def test_nonuniform_types_gpu():
blocks = [16, 16, 1]
threads_per_block = [32, 16, 1]
pipe = nonuniform_types_pipe(
batch_size=8, num_threads=1, device_id=0,
run_fn=nonuniform_types_run_gpu,
out_types=[dali_types.UINT8, dali_types.INT64],
in_types=[dali_types.UINT8],
outs_ndim=[3, 1],
ins_ndim=[3],
device="gpu",
blocks=blocks, threads_per_block=threads_per_block)
pipe.build()
for _ in range(3):
images_in, images_out, img_shape = pipe.run()
images_in, images_out, img_shape = \
(images_in.as_cpu(), images_out.as_cpu(), img_shape.as_cpu())
for i in range(len(images_in)):
assert np.array_equal(255 - images_in.at(i), images_out.at(i))
assert np.array_equal(images_out.at(i).shape, img_shape.at(i))
|
DALI-main
|
dali/test/python/operator_1/test_numba_func.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
from nvidia.dali import pipeline_def, fn
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali as dali
import numpy as np
import os
from nose_utils import assert_raises
from test_utils import as_array
from test_utils import compare_pipelines
from test_utils import RandomDataIterator
from test_utils import get_dali_extra_path
from test_slice import check_slice_output, abs_slice_start_and_end
import itertools
from nose2.tools import params
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
test_data_video = os.path.join(test_data_root, 'db', 'optical_flow', 'sintel_trailer')
class CropPipeline(Pipeline):
def __init__(self, device, batch_size, num_threads=1, device_id=0, num_gpus=1,
crop_shape=(224, 224), crop_x=0.3, crop_y=0.2, is_fused_decoder=False):
super(CropPipeline, self).__init__(batch_size, num_threads, device_id)
self.is_fused_decoder = is_fused_decoder
self.device = device
self.input = ops.readers.Caffe(path=caffe_db_folder, shard_id=device_id,
num_shards=num_gpus)
if self.is_fused_decoder:
self.decode = ops.decoders.ImageCrop(device="cpu",
crop=crop_shape,
crop_pos_x=crop_x,
crop_pos_y=crop_y,
output_type=types.RGB)
else:
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.crop = ops.Crop(device=self.device,
crop=crop_shape,
crop_pos_x=crop_x,
crop_pos_y=crop_y)
def define_graph(self):
inputs, _ = self.input(name="Reader")
if self.is_fused_decoder:
images = self.decode(inputs)
return images
else:
images = self.decode(inputs)
if self.device == 'gpu':
images = images.gpu()
out = self.crop(images)
return out
def check_crop_vs_fused_decoder(device, batch_size):
compare_pipelines(CropPipeline(device, batch_size, is_fused_decoder=True),
CropPipeline(device, batch_size, is_fused_decoder=False),
batch_size=batch_size, N_iterations=3)
def test_crop_vs_fused_decoder():
for device in {'cpu', 'gpu'}:
for batch_size in {1, 32}:
yield check_crop_vs_fused_decoder, device, batch_size
def check_crop_cpu_vs_gpu(batch_size):
compare_pipelines(CropPipeline('cpu', batch_size),
CropPipeline('gpu', batch_size),
batch_size=batch_size, N_iterations=3)
def test_crop_cpu_vs_gpu():
for batch_size in {1, 32}:
yield check_crop_cpu_vs_gpu, batch_size
class CropSequencePipeline(Pipeline):
def __init__(self, device, batch_size, layout, iterator, num_threads=1, device_id=0):
super(CropSequencePipeline, self).__init__(batch_size, num_threads, device_id)
self.device = device
self.layout = layout
self.iterator = iterator
self.inputs = ops.ExternalSource()
self.crop = ops.Crop(device=self.device,
crop=(224, 224),
crop_pos_x=0.3,
crop_pos_y=0.2)
def define_graph(self):
self.data = self.inputs()
sequence = self.data.gpu() if self.device == 'gpu' else self.data
out = self.crop(sequence)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
class CropSequencePythonOpPipeline(Pipeline):
def __init__(self, function, batch_size, layout, iterator, num_threads=1, device_id=0):
super(CropSequencePythonOpPipeline, self).__init__(batch_size,
num_threads,
device_id,
exec_async=False,
exec_pipelined=False)
self.layout = layout
self.iterator = iterator
self.inputs = ops.ExternalSource()
self.crop = ops.PythonFunction(function=function, output_layouts=layout)
def define_graph(self):
self.data = self.inputs()
out = self.crop(self.data)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
def crop_func_help(image, layout, crop_y=0.2, crop_x=0.3, crop_h=224, crop_w=224):
if layout == "FHWC":
assert len(image.shape) == 4
H = image.shape[1]
W = image.shape[2]
elif layout == "HWC":
assert len(image.shape) == 3
H = image.shape[0]
W = image.shape[1]
assert H >= crop_h
assert W >= crop_w
start_y = int(np.float32(crop_y) * np.float32(H - crop_h) + np.float32(0.5))
end_y = start_y + crop_h
start_x = int(np.float32(crop_x) * np.float32(W - crop_w) + np.float32(0.5))
end_x = start_x + crop_w
if layout == "FHWC":
return image[:, start_y:end_y, start_x:end_x, :]
elif layout == "HWC":
return image[start_y:end_y, start_x:end_x, :]
else:
assert False # should not happen
def crop_NFHWC_func(image):
return crop_func_help(image, "FHWC")
def crop_NHWC_func(image):
return crop_func_help(image, "HWC")
def check_crop_NFHWC_vs_python_op_crop(device, batch_size):
eii1 = RandomDataIterator(batch_size, shape=(10, 300, 400, 3))
eii2 = RandomDataIterator(batch_size, shape=(10, 300, 400, 3))
compare_pipelines(CropSequencePipeline(device, batch_size, "FHWC", iter(eii1)),
CropSequencePythonOpPipeline(crop_NFHWC_func, batch_size, "FHWC", iter(eii2)),
batch_size=batch_size, N_iterations=3)
def test_crop_NFHWC_vs_python_op_crop():
for device in {'cpu', 'gpu'}:
for batch_size in {1, 4}:
yield check_crop_NFHWC_vs_python_op_crop, device, batch_size
def check_crop_NHWC_vs_python_op_crop(device, batch_size):
eii1 = RandomDataIterator(batch_size, shape=(300, 400, 3))
eii2 = RandomDataIterator(batch_size, shape=(300, 400, 3))
compare_pipelines(CropSequencePipeline(device, batch_size, "HWC", iter(eii1)),
CropSequencePythonOpPipeline(crop_NHWC_func, batch_size, "HWC", iter(eii2)),
batch_size=batch_size, N_iterations=3)
def test_crop_NHWC_vs_python_op_crop():
for device in {'cpu', 'gpu'}:
for batch_size in {1, 4}:
yield check_crop_NHWC_vs_python_op_crop, device, batch_size
class CropCastPipeline(Pipeline):
def __init__(self, device, batch_size, num_threads=1, device_id=0, num_gpus=1,
should_perform_cast=False):
super(CropCastPipeline, self).__init__(batch_size, num_threads, device_id)
self.should_perform_cast = should_perform_cast
self.device = device
self.input = ops.readers.Caffe(path=caffe_db_folder, shard_id=device_id,
num_shards=num_gpus)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
if self.should_perform_cast:
self.crop = ops.Crop(device=self.device, crop=(224, 224), crop_pos_x=0.3,
crop_pos_y=0.2, dtype=types.FLOAT)
self.crop2 = ops.Crop(device=self.device, crop=(224, 224), crop_pos_x=0.0,
crop_pos_y=0.0, dtype=types.UINT8)
else:
self.crop = ops.Crop(device=self.device, crop=(224, 224), crop_pos_x=0.3,
crop_pos_y=0.2)
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
if self.device == 'gpu':
images = images.gpu()
if self.should_perform_cast:
images_float = self.crop(images)
images = self.crop2(images_float)
else:
images = self.crop(images)
return images
def check_crop_no_cast_vs_cast_to_float_and_back(device, batch_size):
compare_pipelines(CropCastPipeline(device, batch_size, should_perform_cast=False),
CropCastPipeline(device, batch_size, should_perform_cast=True),
batch_size=batch_size, N_iterations=3)
def test_crop_no_cast_vs_cast_to_float_and_back():
for device in {'cpu', 'gpu'}:
for batch_size in {1, 4}:
yield check_crop_no_cast_vs_cast_to_float_and_back, device, batch_size
class Crop3dPipeline(Pipeline):
def __init__(self, device, batch_size, iterator, data_shape, data_layout, num_threads=1,
device_id=0, crop_seq_as_depth=False):
super(Crop3dPipeline, self).__init__(batch_size, num_threads, device_id)
self.device = device
self.iterator = iterator
self.inputs = ops.ExternalSource()
self.data_shape = data_shape
self.data_layout = data_layout
if self.data_layout == "DHWC":
D, H, W, _ = self.data_shape
elif self.data_layout == "CDHW":
_, D, H, W = self.data_shape
elif self.data_layout == "FHWC" and crop_seq_as_depth:
D, H, W, _ = self.data_shape
elif self.data_layout == "FCHW" and crop_seq_as_depth:
D, _, H, W = self.data_shape
else:
assert False
self.crop = ops.Crop(device=self.device,
crop_pos_z=0.1,
crop_pos_y=0.2,
crop_pos_x=0.3,
crop_d=D * 0.91,
crop_h=H * 0.85,
crop_w=W * 0.75)
def define_graph(self):
self.data = self.inputs()
sequence = self.data.gpu() if self.device == 'gpu' else self.data
out = self.crop(sequence)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.data_layout)
class Crop3dPythonOpPipeline(Pipeline):
def __init__(self, function, batch_size, iterator, data_shape, data_layout, num_threads=1,
device_id=0):
super(Crop3dPythonOpPipeline, self).__init__(batch_size, num_threads, device_id,
exec_async=False, exec_pipelined=False)
self.iterator = iterator
self.inputs = ops.ExternalSource()
self.data_shape = data_shape
self.data_layout = data_layout
def crop_func(image):
return function(image, layout=self.data_layout, shape=self.data_shape)
self.crop = ops.PythonFunction(function=crop_func, output_layouts=data_layout)
def define_graph(self):
self.data = self.inputs()
out = self.crop(self.data)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.data_layout)
def crop_3d_func(image, layout, shape, crop_anchor=(0.1, 0.2, 0.3), crop_shape=(0.91, 0.85, 0.75)):
assert len(image.shape) == 4
assert len(crop_anchor) == 3
assert len(crop_shape) == 3
if layout == "DHWC":
D, H, W = image.shape[0], image.shape[1], image.shape[2]
elif layout == "CDHW":
D, H, W = image.shape[1], image.shape[2], image.shape[3]
else:
assert False
crop_d, crop_h, crop_w = int(crop_shape[0] * D), int(crop_shape[1] * H), int(crop_shape[2] * W),
assert D >= crop_d
assert H >= crop_h
assert W >= crop_w
crop_z, crop_y, crop_x = crop_anchor[0], crop_anchor[1], crop_anchor[2]
start_z = int(np.float32(0.5) + np.float32(crop_z) * np.float32(D - crop_d))
end_z = start_z + crop_d
start_y = int(np.float32(0.5) + np.float32(crop_y) * np.float32(H - crop_h))
end_y = start_y + crop_h
start_x = int(np.float32(0.5) + np.float32(crop_x) * np.float32(W - crop_w))
end_x = start_x + crop_w
if layout == "DHWC":
return image[start_z:end_z, start_y:end_y, start_x:end_x, :]
elif layout == "CDHW":
return image[:, start_z:end_z, start_y:end_y, start_x:end_x]
else:
assert False
def check_crop_3d_vs_python_op_crop(device, batch_size, layout, shape):
eii1 = RandomDataIterator(batch_size, shape=shape)
eii2 = RandomDataIterator(batch_size, shape=shape)
compare_pipelines(
Crop3dPipeline(device, batch_size, iter(eii1), data_shape=shape, data_layout=layout),
Crop3dPythonOpPipeline(crop_3d_func, batch_size, iter(eii2), data_shape=shape,
data_layout=layout), batch_size=batch_size, N_iterations=3)
def test_crop_3d_vs_python_op_crop():
for device in {'cpu', 'gpu'}:
for batch_size in {1, 4}:
for layout, shape in {("DHWC", (300, 100, 10, 3)),
("DHWC", (100, 300, 10, 1)),
("DHWC", (10, 30, 300, 1)),
("DHWC", (20, 50, 60, 8)),
("CDHW", (3, 300, 100, 10)),
("CDHW", (3, 300, 10, 100)),
("CDHW", (8, 30, 10, 50))}:
yield check_crop_3d_vs_python_op_crop, device, batch_size, layout, shape
def check_crop_sequence_length(device, batch_size, dtype, input_layout, input_shape):
if input_layout == "FHWC":
D, H, W, C = input_shape
elif input_layout == "FCHW":
D, C, H, W = input_shape
else:
assert False
crop_d = int(D * 0.91)
crop_h = int(H * 0.85)
crop_w = int(W * 0.75)
if input_layout == "FHWC":
crop_shape = (crop_d, crop_h, crop_w, C)
elif input_layout == "FCHW":
crop_shape = (crop_d, C, crop_h, crop_w)
else:
assert False
eii1 = RandomDataIterator(batch_size, shape=input_shape)
pipe = Crop3dPipeline(device, batch_size, iter(eii1),
data_shape=input_shape, data_layout=input_layout, crop_seq_as_depth=True)
pipe.build()
out = pipe.run()
out_data = out[0]
for i in range(batch_size):
assert out_data.at(i).shape == crop_shape, \
"Shape mismatch {} != {}".format(out_data.at(i).shape, crop_shape)
# Tests cropping along the sequence dimension as if it was depth
def test_cmn_crop_sequence_length():
input_configs = {("FHWC", (10, 60, 80, 3)),
("FCHW", (10, 3, 60, 80))}
for device in ['cpu']:
for batch_size in [8]:
for dtype in [types.FLOAT]:
for input_layout, input_shape in input_configs:
assert len(input_layout) == len(input_shape)
yield check_crop_sequence_length, device, batch_size, dtype, \
input_layout, input_shape
class CropSynthPipe(Pipeline):
def __init__(self, device, batch_size, data_iterator, num_threads=1, device_id=0, num_gpus=1,
crop_shape=(224, 224), crop_x=0.3, crop_y=0.2, extra_outputs=False,
out_of_bounds_policy=None, fill_values=None, layout="HWC"):
super(CropSynthPipe, self).__init__(batch_size, num_threads, device_id)
self.device = device
self.extra_outputs = extra_outputs
self.inputs = ops.ExternalSource()
self.data_iterator = data_iterator
self.layout = layout
self.crop = ops.Crop(device=self.device, crop=crop_shape, crop_pos_x=crop_x,
crop_pos_y=crop_y, out_of_bounds_policy=out_of_bounds_policy,
fill_values=fill_values)
def define_graph(self):
self.data = self.inputs()
images = self.data.gpu() if self.device == 'gpu' else self.data
out = self.crop(images)
if self.extra_outputs:
return out, images
else:
return out
def iter_setup(self):
data = self.data_iterator.next()
self.feed_input(self.data, data, layout=self.layout)
def check_crop_with_out_of_bounds_policy_support(device, batch_size, input_shape=(100, 200, 3),
out_of_bounds_policy=None,
fill_values=(0x76, 0xb9, 0x00)):
# This test case is written with HWC layout in mind and "HW" axes in slice arguments
layout = "HWC"
assert len(input_shape) == 3
if fill_values is not None and len(fill_values) > 1:
assert input_shape[2] == len(fill_values)
eii = RandomDataIterator(batch_size, shape=input_shape)
crop_shape = tuple(extent * 2 for extent in input_shape[:2])
crop_y = 0.5
crop_x = 0.5
pipe = CropSynthPipe(device, batch_size, iter(eii),
layout=layout,
crop_shape=crop_shape,
crop_x=crop_y, crop_y=crop_x,
out_of_bounds_policy=out_of_bounds_policy,
fill_values=fill_values,
extra_outputs=True)
if fill_values is None:
fill_values = 0
pipe.build()
for k in range(3):
outs = pipe.run()
out = outs[0]
in_data = outs[1]
if isinstance(out, dali.backend_impl.TensorListGPU):
out = out.as_cpu()
if isinstance(in_data, dali.backend_impl.TensorListGPU):
in_data = in_data.as_cpu()
assert batch_size == len(out)
for idx in range(batch_size):
sample_in = in_data.at(idx)
sample_out = out.at(idx)
in_shape = list(sample_in.shape)
crop_anchor_norm = [crop_y, crop_x]
crop_anchor_abs = [
crop_anchor_norm[k] * (input_shape[k] - crop_shape[k]) for k in range(2)
]
abs_start, abs_end, abs_slice_shape = abs_slice_start_and_end(
in_shape[:2], crop_anchor_abs, crop_shape, False, False)
check_slice_output(sample_in, sample_out, crop_anchor_abs, abs_slice_shape, abs_start,
abs_end, out_of_bounds_policy, fill_values)
def test_crop_with_out_of_bounds_policy_support():
in_shape = (40, 80, 3)
for out_of_bounds_policy in ['pad', 'trim_to_shape']:
for device in ['gpu', 'cpu']:
for batch_size in [1, 3]:
for fill_values in [None, (0x76, 0xb0, 0x00)]:
yield check_crop_with_out_of_bounds_policy_support, \
device, batch_size, in_shape, out_of_bounds_policy, fill_values
def check_crop_with_out_of_bounds_error(device, batch_size, input_shape=(100, 200, 3)):
# This test case is written with HWC layout in mind and "HW" axes in slice arguments
layout = "HWC"
assert len(input_shape) == 3
eii = RandomDataIterator(batch_size, shape=input_shape)
crop_shape = tuple(extent * 2 for extent in input_shape[:2])
crop_y = 0.5
crop_x = 0.5
pipe = CropSynthPipe(device, batch_size, iter(eii),
layout=layout,
crop_shape=crop_shape,
crop_x=crop_x, crop_y=crop_y,
out_of_bounds_policy="error")
pipe.build()
with assert_raises(RuntimeError,
glob="Slice can't be placed out of bounds with current policy."):
_ = pipe.run()
def test_slice_with_out_of_bounds_error():
in_shape = (40, 80, 3)
for device in ['gpu', 'cpu']:
for batch_size in [1, 3]:
yield check_crop_with_out_of_bounds_error, \
device, batch_size, in_shape
def check_crop_wrong_layout(device, batch_size, input_shape=(100, 200, 3), layout="ABC"):
assert len(layout) == len(input_shape)
@pipeline_def
def get_pipe():
def get_data():
out = [np.zeros(input_shape, dtype=np.uint8) for _ in range(batch_size)]
return out
data = fn.external_source(source=get_data, layout=layout, device=device)
return fn.crop(data, crop_h=10, crop_w=10)
pipe = get_pipe(batch_size=batch_size, device_id=0, num_threads=3)
pipe.build()
with assert_raises(RuntimeError,
glob=f"The layout \"{layout}\" does not match any of the allowed layouts"):
pipe.run()
def test_crop_wrong_layout():
in_shape = (40, 80, 3)
batch_size = 3
for device in ['gpu', 'cpu']:
for layout in ['ABC']:
yield check_crop_wrong_layout, device, batch_size, in_shape, layout
def check_crop_empty_layout(device, batch_size, input_shape=(100, 200, 3)):
@pipeline_def
def get_pipe():
def get_data():
out = [np.zeros(input_shape, dtype=np.uint8) for _ in range(batch_size)]
return out
data = fn.external_source(source=get_data, device=device)
return fn.crop(data, crop_h=10, crop_w=20)
pipe = get_pipe(batch_size=batch_size, device_id=0, num_threads=3)
pipe.build()
data, = pipe.run()
for i in range(batch_size):
assert as_array(data[i]).shape == (10, 20, 3)
def test_crop_empty_layout():
in_shape = (40, 80, 3)
batch_size = 3
for device in ['gpu', 'cpu']:
yield check_crop_empty_layout, device, batch_size, in_shape
@params(*itertools.product(('cpu', 'gpu'), ('HWC', 'FHWC', 'CHW')))
def test_crop_arg_input(device, layout):
@pipeline_def
def pipe():
assert 'C' in layout
spatial_ndim = len(layout) - 1
shape = [100 if layout[i] != 'C' else 3 for i in range(len(layout))]
data = fn.random.uniform(range=[0, 255], shape=shape, device='cpu')
if device == 'gpu':
data = data.gpu()
data = fn.reshape(data, layout=layout)
crop_arg = fn.random.uniform(range=[10, 90], shape=(spatial_ndim,))
out = fn.crop(data, crop=crop_arg)
return out, crop_arg
p = pipe(batch_size=3, num_threads=1, device_id=0)
p.build()
out, shape = p.run()
ndim = len(layout)
channel_dim = layout.find('C')
spatial_dims = [k for k in range(ndim) if k != channel_dim]
for i in range(len(out)):
expected = list(np.array(shape[i], dtype=np.int32))
actual = [np.array(out[i].shape())[k] for k in spatial_dims]
assert expected == actual, f"{expected} != {actual}"
@params(*itertools.product(('cpu', 'gpu'), ('round', 'truncate')))
def test_crop_rounding(device, rounding):
# Checking window placement when the cropping window extent is an odd number
input_shape = (20, 8, 3)
crop = (11, 9)
@pipeline_def
def pipe():
data = fn.random.uniform(range=[0, 255], shape=input_shape, device='cpu')
if device == 'gpu':
data = data.gpu()
data = fn.reshape(data, layout='HWC')
cropped = fn.crop(data, crop=crop, out_of_bounds_policy='pad', rounding=rounding)
return data, cropped
p = pipe(batch_size=1, num_threads=1, device_id=0)
p.build()
input_data, cropped_data = p.run()
data = as_array(input_data[0])
cropped = as_array(cropped_data[0])
if rounding == 'truncate':
# padding happens to the right of the input
np.testing.assert_array_equal(data[4:15, :, :], cropped[:, 0:8, :])
elif rounding == 'round':
# padding happens to the left of the input
np.testing.assert_array_equal(data[5:16, :, :], cropped[:, 1:9, :])
|
DALI-main
|
dali/test/python/operator_1/test_crop.py
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import numpy as np
from functools import partial
from test_utils import compare_pipelines
from test_utils import RandomDataIterator
class PowerSpectrumPipeline(Pipeline):
def __init__(self, device, batch_size, iterator, axis, nfft, num_threads=1, device_id=0):
super(PowerSpectrumPipeline, self).__init__(batch_size, num_threads, device_id)
self.device = device
self.iterator = iterator
self.inputs = ops.ExternalSource()
self.fft = ops.PowerSpectrum(device=self.device, axis=axis, nfft=nfft)
def define_graph(self):
self.data = self.inputs()
out = self.data.gpu() if self.device == 'gpu' else self.data
out = self.fft(out)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data)
def power_spectrum_numpy(nfft, axis, waveform):
fft_out = np.fft.fft(waveform, axis=axis, n=nfft)
power_spectrum = fft_out.real ** 2 + fft_out.imag ** 2
shape = waveform.shape
out_shape = list(shape)
out_shape[axis] = nfft // 2 + 1
out_shape = tuple(out_shape)
if len(out_shape) == 1:
out = power_spectrum[0:out_shape[0]]
elif len(out_shape) == 2:
out = power_spectrum[0:out_shape[0], 0:out_shape[1]]
elif len(out_shape) == 3:
out = power_spectrum[0:out_shape[0], 0:out_shape[1], 0:out_shape[2]]
return out
class PowerSpectrumNumpyPipeline(Pipeline):
def __init__(self, device, batch_size, iterator, axis, nfft,
num_threads=1, device_id=0):
super(PowerSpectrumNumpyPipeline, self).__init__(
batch_size, num_threads, device_id,
seed=12345, exec_async=False, exec_pipelined=False)
self.device = "cpu"
self.iterator = iterator
self.inputs = ops.ExternalSource()
function = partial(power_spectrum_numpy, nfft, axis)
self.power_spectrum = ops.PythonFunction(function=function)
def define_graph(self):
self.data = self.inputs()
out = self.power_spectrum(self.data)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data)
def check_operator_power_spectrum(device, batch_size, input_shape, nfft, axis):
eii1 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
eii2 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
compare_pipelines(
PowerSpectrumPipeline(device, batch_size, iter(eii1), axis=axis, nfft=nfft),
PowerSpectrumNumpyPipeline(device, batch_size, iter(eii2), axis=axis, nfft=nfft),
batch_size=batch_size, N_iterations=3, eps=1e-04)
def test_operator_power_spectrum():
for device in ['cpu']:
for batch_size in [3]:
for nfft, axis, shape in [(16, 1, (2, 16)),
(1024, 1, (1, 1024)),
(1024, 0, (1024,)),
(128, 1, (1, 100)),
(128, 0, (100,)),
(16, 0, (16, 2)),
(8, 1, (2, 8, 2))]:
yield check_operator_power_spectrum, device, batch_size, shape, nfft, axis
|
DALI-main
|
dali/test/python/operator_1/test_power_spectrum.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import Pipeline, pipeline_def, ops, fn, types
import numpy as np
import os
from functools import partial
from math import floor
from test_utils import compare_pipelines, \
get_dali_extra_path, \
RandomDataIterator, \
generator_random_axes_for_3d_input, \
generator_random_data, \
as_array
from nose_utils import assert_raises
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, "db", "lmdb")
test_data_video = os.path.join(test_data_root, "db", "optical_flow", "sintel_trailer")
def roundint(num):
# std::round has different behaviour than np.round so manually add 0.5 and truncate to int
return int(num + (0.5 if num >= 0 else -0.5))
def abs_slice_start_and_end(in_shape,
slice_anchor, slice_shape,
normalized_anchor, normalized_shape):
ndim = len(in_shape)
if normalized_anchor and normalized_shape:
start = [roundint(in_shape[i] * np.float64(slice_anchor[i]))
for i in range(ndim)]
end = [roundint(in_shape[i] * np.float64(slice_anchor[i] + slice_shape[i]))
for i in range(ndim)]
else:
if normalized_anchor:
start = [roundint(in_shape[i] * np.float64(slice_anchor[i]))
for i in range(ndim)]
else:
start = [roundint(slice_anchor[i])
for i in range(ndim)]
if normalized_shape:
end = [start[i] + roundint(in_shape[i] * np.float64(slice_shape[i]))
for i in range(ndim)]
else:
end = [start[i] + roundint(slice_shape[i])
for i in range(ndim)]
out_shape = [end[i] - start[i] for i in range(ndim)]
return start, end, out_shape
class SliceSynthDataPipeline(Pipeline):
def __init__(self, device, batch_size, layout, iterator, pos_size_iter,
num_threads=1, device_id=0, num_gpus=1,
axes=None, axis_names=None,
normalized_anchor=True, normalized_shape=True,
extra_outputs=False,
out_of_bounds_policy=None,
fill_values=None,
input_type=types.FLOAT, output_type=None):
super().__init__(batch_size, num_threads, device_id, seed=1234)
self.device = device
self.layout = layout
self.iterator = iterator
self.pos_size_iter = pos_size_iter
self.inputs = ops.ExternalSource()
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
self.extra_outputs = extra_outputs
self.cast_in = ops.Cast(dtype=input_type)
self.slice = ops.Slice(device=self.device,
dtype=output_type,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axes=axes,
axis_names=axis_names,
out_of_bounds_policy=out_of_bounds_policy,
fill_values=fill_values)
def define_graph(self):
self.data = self.inputs()
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
data = self.cast_in(self.data)
data = data.gpu() if self.device == "gpu" else data
out = self.slice(data, self.crop_pos, self.crop_size)
if self.extra_outputs:
return out, self.data, self.crop_pos, self.crop_size
else:
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
class SlicePipeline(Pipeline):
def __init__(self, device, batch_size, pos_size_iter,
num_threads=1, device_id=0,
is_fused_decoder=False,
axes=None, axis_names=None,
normalized_anchor=True, normalized_shape=True):
super().__init__(batch_size, num_threads, device_id, seed=1234)
self.is_fused_decoder = is_fused_decoder
self.pos_size_iter = pos_size_iter
self.device = device
self.input = ops.readers.Caffe(path=caffe_db_folder, random_shuffle=False)
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
if self.is_fused_decoder:
self.decode = ops.decoders.ImageSlice(device="cpu",
output_type=types.RGB,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axis_names=axis_names,
axes=axes)
else:
self.decode = ops.decoders.Image(device="cpu",
output_type=types.RGB)
self.slice = ops.Slice(device=self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axis_names=axis_names,
axes=axes)
def define_graph(self):
inputs, labels = self.input(name="Reader")
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
if self.is_fused_decoder:
images = self.decode(inputs, self.crop_pos, self.crop_size)
else:
images = self.decode(inputs)
if self.device == "gpu":
images = images.gpu()
images = self.slice(images, self.crop_pos, self.crop_size)
return images
def iter_setup(self):
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
class SliceArgsIterator(object):
def __init__(self,
batch_size,
num_dims=3,
image_shape=None, # Needed if normalized_anchor and normalized_shape are False
image_layout=None, # Needed if axis_names is used to specify the slice
normalized_anchor=True,
normalized_shape=True,
axes=None,
axis_names=None,
min_norm_anchor=0.0,
max_norm_anchor=0.2,
min_norm_shape=0.4,
max_norm_shape=0.75,
seed=54643613):
self.batch_size = batch_size
self.num_dims = num_dims
self.image_shape = image_shape
self.image_layout = image_layout
self.normalized_anchor = normalized_anchor
self.normalized_shape = normalized_shape
self.axes = axes
self.axis_names = axis_names
self.min_norm_anchor = min_norm_anchor
self.max_norm_anchor = max_norm_anchor
self.min_norm_shape = min_norm_shape
self.max_norm_shape = max_norm_shape
self.seed = seed
if not self.axis_names and not self.axes:
self.axis_names = "WH"
if self.axis_names:
self.axes = []
for axis_name in self.axis_names:
assert axis_name in self.image_layout
self.axes.append(self.image_layout.index(axis_name))
assert len(self.axes) > 0
def __iter__(self):
self.i = 0
self.n = self.batch_size
return self
def __next__(self):
pos = []
size = []
anchor_amplitude = self.max_norm_anchor - self.min_norm_anchor
anchor_offset = self.min_norm_anchor
shape_amplitude = self.max_norm_shape - self.min_norm_shape
shape_offset = self.min_norm_shape
np.random.seed(self.seed)
for k in range(self.batch_size):
norm_anchor = anchor_amplitude * np.random.rand(len(self.axes)) + anchor_offset
norm_shape = shape_amplitude * np.random.rand(len(self.axes)) + shape_offset
if self.normalized_anchor:
anchor = norm_anchor
else:
anchor = [floor(norm_anchor[i] * self.image_shape[self.axes[i]])
for i in range(len(self.axes))]
if self.normalized_shape:
shape = norm_shape
else:
shape = [floor(norm_shape[i] * self.image_shape[self.axes[i]])
for i in range(len(self.axes))]
pos.append(np.asarray(anchor, dtype=np.float32))
size.append(np.asarray(shape, dtype=np.float32))
self.i = (self.i + 1) % self.n
return (pos, size)
next = __next__
def slice_func_helper(axes, axis_names, layout,
normalized_anchor, normalized_shape,
image,
slice_anchor, slice_shape):
# TODO(janton): remove this
if not axes and not axis_names:
axis_names = "WH"
if axis_names:
axes = []
for axis_name in axis_names:
assert axis_name in layout
axis_pos = layout.find(axis_name)
axes.append(axis_pos)
shape = image.shape
full_slice_anchor = [0] * len(shape)
full_slice_shape = list(shape)
for axis in axes:
idx = axes.index(axis)
full_slice_anchor[axis] = slice_anchor[idx]
full_slice_shape[axis] = slice_shape[idx]
start, end, _ = abs_slice_start_and_end(shape,
full_slice_anchor, full_slice_shape,
normalized_anchor, normalized_shape)
if len(full_slice_anchor) == 1:
return image[start[0]:end[0]]
elif len(full_slice_anchor) == 2:
return image[start[0]:end[0], start[1]:end[1]]
elif len(full_slice_anchor) == 3:
return image[start[0]:end[0], start[1]:end[1], start[2]:end[2]]
elif len(full_slice_anchor) == 4:
return image[start[0]:end[0], start[1]:end[1], start[2]:end[2], start[3]:end[3]]
else:
assert False
class SliceSynthDataPipelinePythonOp(Pipeline):
def __init__(self, batch_size, layout, iterator, pos_size_iter,
num_threads=1, device_id=0, num_gpus=1,
axes=None, axis_names=None,
normalized_anchor=True, normalized_shape=True,
input_type=types.FLOAT, output_type=None):
super().__init__(batch_size, num_threads, device_id, seed=12345,
exec_async=False, exec_pipelined=False)
self.device = "cpu"
self.layout = layout
self.iterator = iterator
self.pos_size_iter = pos_size_iter
self.inputs = ops.ExternalSource()
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
self.cast_in = ops.Cast(dtype=input_type)
function = partial(
slice_func_helper, axes, axis_names, self.layout,
normalized_anchor, normalized_shape)
self.slice = ops.PythonFunction(function=function, output_layouts=layout)
self.output_type = output_type
if self.output_type is not None:
self.cast_out = ops.Cast(dtype=output_type)
def define_graph(self):
self.data = self.inputs()
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
out = self.cast_in(self.data)
out = self.slice(out, self.crop_pos, self.crop_size)
if self.output_type is not None:
out = self.cast_out(out)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
class SlicePythonOp(Pipeline):
def __init__(self, batch_size, pos_size_iter,
num_threads=1, device_id=0, num_gpus=1,
axes=None, axis_names=None,
normalized_anchor=True, normalized_shape=True):
super().__init__(batch_size, num_threads, device_id, seed=12345,
exec_async=False, exec_pipelined=False)
self.device = "cpu"
self.layout = "HWC"
self.pos_size_iter = pos_size_iter
self.input = ops.readers.Caffe(path=caffe_db_folder, random_shuffle=False)
self.decode = ops.decoders.Image(device="cpu", output_type=types.RGB)
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
function = partial(
slice_func_helper, axes, axis_names, self.layout,
normalized_anchor, normalized_shape)
self.slice = ops.PythonFunction(function=function, output_layouts="HWC")
def define_graph(self):
imgs, _ = self.input()
imgs = self.decode(imgs)
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
out = self.slice(imgs, self.crop_pos, self.crop_size)
return out
def iter_setup(self):
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
def check_slice_synth_data_vs_numpy(device, batch_size,
input_shape,
layout, axes, axis_names,
normalized_anchor, normalized_shape,
input_type, output_type):
eiis = [RandomDataIterator(batch_size, shape=input_shape)
for k in range(2)]
eii_args = [SliceArgsIterator(batch_size, len(input_shape),
image_shape=input_shape,
image_layout=layout,
axes=axes,
axis_names=axis_names,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape)
for k in range(2)]
compare_pipelines(
SliceSynthDataPipeline(device, batch_size, layout,
iter(eiis[0]), iter(eii_args[0]),
axes=axes, axis_names=axis_names,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
input_type=input_type, output_type=output_type),
SliceSynthDataPipelinePythonOp(batch_size, layout,
iter(eiis[0]), iter(eii_args[1]),
axes=axes, axis_names=axis_names,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
input_type=input_type, output_type=output_type),
batch_size=batch_size, N_iterations=3)
def test_slice_synth_data_vs_numpy():
for device in ["cpu", "gpu"]:
for batch_size in {1, 8}:
for input_shape, layout, axes, axis_names, input_type, output_type in [
((200, 400, 3), "HWC", None, "WH", types.FLOAT, None),
((200, 400, 3), "HWC", None, "HW", types.FLOAT, None),
((200, 400, 3), "HWC", None, "HW", types.INT32, types.FLOAT),
((200, 400, 3), "HWC", None, "HW", types.INT64, types.UINT8),
((200, 400, 3), "HWC", None, "C", types.FLOAT, None),
((200, 400, 3), "HWC", (1, 0), None, types.FLOAT, types.FLOAT16),
((200, 400, 3), "HWC", (0, 1), None, types.FLOAT16, types.FLOAT16),
((200, 400, 3), "HWC", (2,), None, types.FLOAT, None),
((200,), "H", (0,), None, types.FLOAT, None),
((200,), "H", None, "H", types.FLOAT, None),
((200, 400), "HW", (1,), None, types.FLOAT, None),
((200, 400), "HW", None, "W", types.FLOAT, None),
((80, 30, 20, 3), "DHWC", (2, 1, 0), None, types.FLOAT, None),
((80, 30, 20, 3), "DHWC", (0, 1, 2), None, types.FLOAT, None),
((80, 30, 20, 3), "DHWC", (2, 1), None, types.FLOAT, None),
((80, 30, 20, 3), "DHWC", None, "WHD", types.FLOAT, None),
((80, 30, 20, 3), "DHWC", None, "DHW", types.FLOAT, None),
((80, 30, 20, 3), "DHWC", None, "WH", types.FLOAT, None),
((80, 30, 20, 3), "DHWC", None, "C", types.FLOAT, None)
]:
normalized_anchor = np.random.choice([True, False])
normalized_shape = np.random.choice([True, False])
yield (check_slice_synth_data_vs_numpy, device, batch_size,
input_shape, layout,
axes, axis_names,
normalized_anchor, normalized_shape,
input_type, output_type)
def check_slice_vs_fused_decoder(device, batch_size, axes, axis_names):
eii_args = [SliceArgsIterator(batch_size, image_layout="HWC", axes=axes, axis_names=axis_names)
for k in range(2)]
compare_pipelines(
SlicePipeline(device, batch_size, iter(eii_args[0]),
axes=axes, axis_names=axis_names, is_fused_decoder=False),
SlicePipeline(device, batch_size, iter(eii_args[1]),
axes=axes, axis_names=axis_names, is_fused_decoder=True),
batch_size=batch_size, N_iterations=3)
def test_slice_vs_fused_decoder():
for device in ["cpu", "gpu"]:
for batch_size in {1}:
for axes, axis_names in [
(None, "WH"), (None, "HW"),
((1, 0), None), ((0, 1), None)
]:
yield check_slice_vs_fused_decoder, device, batch_size, axes, axis_names
def check_slice_vs_numpy(device, batch_size, axes, axis_names):
eii_args = [SliceArgsIterator(batch_size, image_layout="HWC", axes=axes, axis_names=axis_names)
for k in range(2)]
compare_pipelines(
SlicePipeline(device, batch_size, iter(eii_args[0]), axes=axes, axis_names=axis_names),
SlicePythonOp(batch_size, iter(eii_args[1]), axes=axes, axis_names=axis_names),
batch_size=batch_size, N_iterations=3)
def test_slice_vs_numpy():
for device in ["cpu", "gpu"]:
for batch_size in {1}:
for axes, axis_names in [
(None, "WH"), (None, "HW"),
((1, 0), None), ((0, 1), None)
]:
yield check_slice_vs_numpy, device, batch_size, axes, axis_names
def check_slice_output(sample_in, sample_out,
anchor,
abs_slice_shape, abs_start, abs_end,
out_of_bounds_policy, fill_values,
naxes=2,
mean=None, std=None,
flip=None,
permute=None):
in_shape = sample_in.shape
out_shape = sample_out.shape
ndim = len(out_shape)
orig_nchannels = in_shape[2]
out_ch_dim = permute.index(2) if permute is not None else 2
out_nchannels = out_shape[out_ch_dim]
if out_of_bounds_policy == "pad":
if permute is not None:
assert all([abs_slice_shape[permute[i]] == out_shape[i]
for i in range(ndim) if permute[i] < naxes])
else:
assert all([abs_slice_shape[i] == out_shape[i]
for i in range(naxes)])
elif out_of_bounds_policy == "trim_to_shape":
assert all([out_shape[i] <= in_shape[i] for i in range(naxes)])
for i in range(naxes):
if abs_start[i] < 0:
abs_start[i] = 0
if abs_end[i] > in_shape[i]:
abs_end[i] = in_shape[i]
abs_slice_shape[i] = abs_end[i] - abs_start[i]
if permute is not None:
assert all([abs_slice_shape[permute[i]] == out_shape[i]
for i in range(ndim) if permute[i] < naxes])
else:
assert all([abs_slice_shape[i] == out_shape[i]
for i in range(naxes)])
else:
raise ValueError(f"Wrong out_of_bounds_policy: {out_of_bounds_policy}")
pad_before = [-abs_start[i] if abs_start[i] < 0 else 0
for i in range(naxes)]
pad_after = [abs_end[i] - in_shape[i] if in_shape[i] < abs_end[i] else 0
for i in range(naxes)]
sliced = [abs_slice_shape[i] - pad_before[i] - pad_after[i]
for i in range(naxes)]
if out_of_bounds_policy == "trim_to_shape":
assert all([pad_before[i] == 0 for i in range(naxes)])
assert all([pad_after[i] == 0 for i in range(naxes)])
if permute is not None:
assert all([sliced[permute[i]] == out_shape[i]
for i in range(ndim) if permute[i] < naxes])
else:
assert all([sliced[i] == out_shape[i] for i in range(naxes)])
pos_start = [abs_start[i] if abs_start[i] >= 0 else 0 for i in range(naxes)]
in_sliced = sample_in[pos_start[0] : pos_start[0] + sliced[0], # noqa:E203
pos_start[1] : pos_start[1] + sliced[1], :] # noqa:E203
slice_shape = (abs_slice_shape[0], abs_slice_shape[1], out_nchannels)
expected = np.zeros(slice_shape, dtype=np.float32)
expected[:, :, :orig_nchannels] = np.full((slice_shape[0], slice_shape[1], orig_nchannels),
fill_values)
should_normalize = mean is not None and std is not None
expected[pad_before[0] : pad_before[0] + sliced[0], # noqa:E203
pad_before[1] : pad_before[1] + sliced[1], # noqa:E203
: orig_nchannels] = (in_sliced - mean) / std if should_normalize else in_sliced
if flip is not None:
for d in range(len(flip)):
if flip[d]:
expected = np.flip(expected, d)
if permute is not None:
expected = np.transpose(expected, permute)
np.testing.assert_allclose(sample_out, expected, atol=1e-07)
def check_slice_with_out_of_bounds_policy_support(device,
batch_size,
input_shape=(100, 200, 3),
out_of_bounds_policy=None,
fill_values=(0x76, 0xb9, 0x00),
normalized_anchor=False,
normalized_shape=False):
# This test case is written with HWC layout in mind and "HW" axes in slice arguments
axis_names = "HW"
axes = None
layout = "HWC"
assert len(input_shape) == 3
if fill_values is not None and len(fill_values) > 1:
assert input_shape[2] == len(fill_values)
eii = RandomDataIterator(batch_size, shape=input_shape)
eii_arg = SliceArgsIterator(batch_size, len(input_shape),
image_shape=input_shape,
image_layout=layout,
axes=axes, axis_names=axis_names,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
min_norm_anchor=-0.5, max_norm_anchor=-0.1,
min_norm_shape=1.1, max_norm_shape=3.6)
pipe = SliceSynthDataPipeline(device, batch_size, layout, iter(eii), iter(eii_arg),
axes=axes, axis_names=axis_names,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
out_of_bounds_policy=out_of_bounds_policy,
fill_values=fill_values,
extra_outputs=True)
if fill_values is None:
fill_values = 0
pipe.build()
for _ in range(3):
outs = pipe.run()
out, in_data, anchor_data, shape_data = outs
assert batch_size == len(out)
for idx in range(batch_size):
sample_in = as_array(in_data[idx])
sample_out = as_array(out[idx])
anchor = as_array(anchor_data[idx])
shape = as_array(shape_data[idx])
in_shape = sample_in.shape
abs_start, abs_end, abs_slice_shape = abs_slice_start_and_end(
in_shape[:2], anchor, shape, normalized_anchor, normalized_shape)
check_slice_output(sample_in, sample_out, anchor, abs_slice_shape,
abs_start, abs_end, out_of_bounds_policy, fill_values)
def test_slice_with_out_of_bounds_policy_support():
in_shape = (40, 80, 3)
for out_of_bounds_policy in ["pad", "trim_to_shape"]:
for device in ["gpu", "cpu"]:
for batch_size in [1, 3]:
for normalized_anchor, normalized_shape in [(False, False), (True, True)]:
for fill_values in [None, (0x76, 0xb0, 0x00)]:
yield check_slice_with_out_of_bounds_policy_support, \
device, batch_size, in_shape, out_of_bounds_policy, fill_values, \
normalized_anchor, normalized_shape
def check_slice_with_out_of_bounds_error(device, batch_size, input_shape=(100, 200, 3),
normalized_anchor=False, normalized_shape=False):
# This test case is written with HWC layout in mind and "HW" axes in slice arguments
axis_names = "HW"
axes = None
layout = "HWC"
assert len(input_shape) == 3
eii = RandomDataIterator(batch_size, shape=input_shape)
eii_arg = SliceArgsIterator(batch_size, len(input_shape), image_shape=input_shape,
image_layout=layout, axes=axes, axis_names=axis_names,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
min_norm_anchor=-0.5, max_norm_anchor=-0.1,
min_norm_shape=1.1, max_norm_shape=3.6)
pipe = SliceSynthDataPipeline(device, batch_size, layout, iter(eii), iter(eii_arg),
axes=axes, axis_names=axis_names,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
out_of_bounds_policy="error")
pipe.build()
with assert_raises(RuntimeError,
glob="Slice can't be placed out of bounds with current policy. Got:"):
_ = pipe.run()
def test_slice_with_out_of_bounds_error():
in_shape = (40, 80, 3)
for device in ["gpu", "cpu"]:
for batch_size in [1, 3]:
for normalized_anchor, normalized_shape in [(False, False), (True, True)]:
yield (check_slice_with_out_of_bounds_error,
device, batch_size,
in_shape, normalized_anchor, normalized_shape)
def check_slice_named_args(device, batch_size):
test_data_shape = [5, 4, 3]
def get_data():
out = [np.random.randint(0, 255, size=test_data_shape, dtype=np.uint8)
for _ in range(batch_size)]
return out
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
with pipe:
data = fn.external_source(source=get_data, layout="HWC")
in_shape_list = [5, 4]
start_list = [1, 2]
shape_list = [3, 1]
in_shape = np.array(in_shape_list)
start = np.array(start_list)
shape = np.array(shape_list)
end_list = [start_list[i] + shape_list[i] for i in range(2)]
end = start + shape
rel_start_list = [start_list[i] / in_shape_list[i] for i in range(2)]
rel_start = start / in_shape
rel_shape_list = [shape_list[i] / in_shape_list[i] for i in range(2)]
rel_shape = shape / in_shape
rel_end_list = [end_list[i] / in_shape_list[i] for i in range(2)]
rel_end = end / in_shape
outs = [
fn.slice(data, start, shape, axes=(0, 1)),
fn.slice(data, rel_start, rel_shape, axes=(0, 1)),
]
for start_arg in [start, start_list]:
for shape_arg in [shape, shape_list]:
outs += [
fn.slice(data, start=start_arg, shape=shape_arg, axes=(0, 1))
]
for end_arg in [end, end_list]:
outs += [
fn.slice(data, start=start_arg, end=end_arg, axes=(0, 1))
]
for rel_start_arg in [rel_start, rel_start_list]:
for rel_shape_arg in [rel_shape, rel_shape_list]:
outs += [
fn.slice(data, rel_start=rel_start_arg, rel_shape=rel_shape_arg, axes=(0, 1))
]
for rel_end_arg in [rel_end, rel_end_list]:
outs += [
fn.slice(data, rel_start=rel_start_arg, rel_end=rel_end_arg, axes=(0, 1))
]
for shape_arg in [shape, shape_list]:
outs += [
fn.slice(data, rel_start=rel_start_arg, shape=shape_arg, axes=(0, 1))
]
pipe.set_outputs(*outs)
pipe.build()
for _ in range(3):
outs = pipe.run()
for out_idx in range(1, len(outs)):
for sample in range(batch_size):
np.testing.assert_equal(np.array(outs[0][sample]), np.array(outs[out_idx][sample]))
def test_slice_named_args():
yield check_slice_named_args, "cpu", 3
yield check_slice_named_args, "gpu", 3
def check_slice_named_args_default_start_or_end(device, batch_size):
test_data_shape = [5, 4, 3]
def get_data():
out = [np.random.randint(0, 255, size=test_data_shape, dtype=np.uint8)
for _ in range(batch_size)]
return out
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
with pipe:
data = fn.external_source(source=get_data, layout="HWC")
in_shape = np.array([5, 4])
start = np.array([1, 2])
shape = np.array([3, 1])
end = start + shape
outs = [
fn.slice(data, start=start, end=in_shape, axes=(0, 1)),
fn.slice(data, start=[0, 0], end=end, axes=(0, 1)),
fn.slice(data, start=start, axes=(0, 1)),
fn.slice(data, end=end, axes=(0, 1)),
]
pipe.set_outputs(*outs)
pipe.build()
for _ in range(3):
outs = pipe.run()
for sample in range(batch_size):
np.testing.assert_equal(np.array(outs[0][sample]), np.array(outs[2][sample]))
np.testing.assert_equal(np.array(outs[1][sample]), np.array(outs[3][sample]))
def test_slice_named_default_start_or_end_args():
yield check_slice_named_args_default_start_or_end, "cpu", 3
yield check_slice_named_args_default_start_or_end, "gpu", 3
def check_slice_named_args_errors(device, batch_size):
test_data_shape = [5, 4, 3]
def get_data():
out = [np.random.randint(0, 255, size=test_data_shape, dtype=np.uint8)
for _ in range(batch_size)]
return out
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
with pipe:
data = fn.external_source(source=get_data, layout="HWC")
start = np.array([1, 2])
shape = np.array([3, 1])
outs = [
fn.slice(data, start, shape, start=start, end=start + shape, shape=shape, axes=(0, 1)),
]
pipe.set_outputs(*outs)
with assert_raises(
RuntimeError,
glob='"end", "rel_end", "shape", and "rel_shape" arguments are mutually exclusive'):
pipe.build()
for _ in range(1):
outs = pipe.run()
def test_slice_named_args_errors():
yield check_slice_named_args_errors, "cpu", 1
yield check_slice_named_args_errors, "gpu", 1
def check_no_slice(device, dtype, batch_size, num_threads):
@pipeline_def(batch_size=batch_size, num_threads=num_threads, device_id=0)
def make_pipe():
encoded, _ = fn.readers.caffe(path=caffe_db_folder, random_shuffle=False)
image = fn.decoders.image(encoded, device="cpu", output_type=types.RGB)
if device == "gpu":
image = image.gpu()
image = fn.cast(image, dtype=dtype)
sliced1 = fn.slice(image, 0, 3, axes=(2,))
sliced2 = fn.slice(image, rel_start=(0, 0, 0), rel_end=(1, 1, 1), axis_names="HWC")
return image, sliced1, sliced2
pipe = make_pipe()
pipe.build()
for _ in range(3):
outs = pipe.run()
nouts = len(outs)
in_img = as_array(outs[0][0])
for out_idx in range(1, nouts):
out_img = as_array(outs[out_idx][0])
np.testing.assert_array_equal(in_img, out_img)
def test_no_slice():
batch_size = 4
num_threads = 3
for device in ["cpu", "gpu"]:
for dtype in [types.UINT8, types.UINT16, types.FLOAT]:
yield check_no_slice, device, dtype, batch_size, num_threads
def check_rel_start_rel_shape(device, batch_size, num_threads,
get_dynamic_axes=None, args_device="cpu"):
image_gen = generator_random_data(
batch_size, min_sh=(10, 10, 3), max_sh=(100, 100, 3),
dtype=np.float32, val_range=[0.0, 1.0])
# Args GPU only possible with GPU backend
assert args_device == device or device == "gpu"
@pipeline_def(batch_size=batch_size, num_threads=num_threads, device_id=0)
def make_pipe():
image = fn.external_source(source=image_gen)
if device == "gpu":
image = image.gpu()
if get_dynamic_axes:
axes, rel_start, rel_shape = fn.external_source(source=get_dynamic_axes, num_outputs=3)
else:
axes = types.Constant(np.array([0, 1], dtype=np.int32), device="cpu")
rel_start = fn.random.uniform(range=(0.1, 0.2), shape=(2,), dtype=types.FLOAT,
device=args_device)
rel_shape = fn.random.uniform(range=(0.4, 0.6), shape=(2,), dtype=types.FLOAT,
device=args_device)
if args_device == "gpu":
sliced = fn.slice(image, rel_start, rel_shape, axes=axes)
return image, axes, rel_start, rel_shape, sliced
else:
sliced1 = fn.slice(image, rel_start=rel_start, rel_shape=rel_shape, axes=axes)
sliced2 = fn.slice(image, rel_start, rel_shape, axes=axes)
return image, axes, rel_start, rel_shape, sliced1, sliced2
pipe = make_pipe()
pipe.build()
ndim = 3
for _ in range(3):
outs = pipe.run()
for sample_idx in range(batch_size):
in_img = as_array(outs[0][sample_idx])
axes = as_array(outs[1][sample_idx])
naxes = axes.shape[0]
if naxes == 0: # Empty axes mean "all axes"
axes = np.array(range(ndim), dtype=np.int32)
rel_start = as_array(outs[2][sample_idx])
rel_shape = as_array(outs[3][sample_idx])
start = np.zeros([ndim], dtype=np.int32)
end = np.array([in_img.shape[i] for i in range(ndim)], dtype=np.int32)
for i in range(len(axes)):
a = axes[i]
assert a >= -ndim and a <= (ndim - 1)
start[a] = roundint(rel_start[i] * in_img.shape[a])
end[a] = roundint((rel_start[i] + rel_shape[i]) * in_img.shape[a])
ref_sliced = in_img[start[0]:end[0], start[1]:end[1], start[2]:end[2]]
# With GPU arguments we don't test named arguments
for out_idx in range(2 if args_device == "cpu" else 1):
sliced = as_array(outs[4 + out_idx][sample_idx])
np.testing.assert_allclose(ref_sliced, sliced)
def check_dynamic_axes(device, batch_size, num_threads, use_negative, use_empty):
get_dynamic_axes = generator_random_axes_for_3d_input(
batch_size, use_negative=use_negative, use_empty=use_empty,
extra_out_desc=[
(0.0, 0.2, np.float32), # rel_start
(0.4, 0.6, np.float32) # rel_shape
])
check_rel_start_rel_shape(device, batch_size, num_threads,
get_dynamic_axes=get_dynamic_axes, args_device="cpu")
def test_dynamic_axes():
batch_size = 10
num_threads = 3
for device in ["cpu", "gpu"]:
yield check_dynamic_axes, device, batch_size, num_threads, False, False
def test_negative_axes():
batch_size = 10
num_threads = 3
for device in ["cpu", "gpu"]:
yield check_dynamic_axes, device, batch_size, num_threads, True, False
def test_empty_axes():
batch_size = 10
num_threads = 3
for device in ["cpu", "gpu"]:
yield check_dynamic_axes, device, batch_size, num_threads, False, True
def check_wrong_axes(device, wrong_axes_range=None, named_args=False):
@pipeline_def(batch_size=1, num_threads=1, device_id=0)
def make_pipe():
fake_data = fn.constant(idata=0, shape=[10, 10, 3], dtype=types.FLOAT, device=device)
axes = fn.random.uniform(range=wrong_axes_range, shape=(2,), dtype=types.INT32)
rel_start = fn.random.uniform(range=[0.0, 0.3], shape=(2,), dtype=types.FLOAT)
rel_shape = fn.random.uniform(range=[0.4, 0.6], shape=(2,), dtype=types.FLOAT)
if named_args:
sliced = fn.slice(fake_data, rel_start=rel_start, rel_shape=rel_shape, axes=axes)
else:
sliced = fn.slice(fake_data, rel_start, rel_shape, axes=axes)
return sliced
p = make_pipe()
p.build()
# Note: [[] and []] are '[' and ']' characters.
assert_raises(RuntimeError, p.run,
glob="Axis * out of range. Expected range is [[]-3, 2[]] for a 3D input")
def test_wrong_axes():
for device in ["cpu", "gpu"]:
for wrong_axes_range in [(-10, -4), (3, 10)]:
for named_args in [False, True]:
yield check_wrong_axes, device, wrong_axes_range, named_args
def check_scalar(device):
batch_size = 5
def get_data():
out = [np.random.ranf(size=[1000]).astype(dtype=np.single) for _ in range(batch_size)]
return out
@pipeline_def(batch_size=batch_size, num_threads=1, device_id=0)
def test_pipe():
data = fn.external_source(source=get_data)
shape = types.ScalarConstant(10)
anchor = types.ScalarConstant(5)
if device != "cpu":
data = data.gpu()
sliced = fn.slice(data, start=anchor, shape=shape, axes=[0], device=device)
return data, sliced, shape, anchor
pipe = test_pipe()
pipe.build()
ref, data, shape, anchor = pipe.run()
for sample_idx in range(batch_size):
d = as_array(data[sample_idx])
r = as_array(ref[sample_idx])
s = as_array(shape[sample_idx])
a = as_array(anchor[sample_idx])
np.testing.assert_allclose(d, r[a:a + s])
def test_scalar():
for device in ["cpu", "gpu"]:
yield check_scalar, device
def test_gpu_args():
batch_size = 10
num_threads = 3
check_rel_start_rel_shape("gpu", batch_size, num_threads, args_device="gpu")
def test_wrong_arg_backend():
@pipeline_def(batch_size=1, num_threads=1, device_id=0)
def make_pipe():
fake_data = fn.constant(idata=0, shape=[10, 10, 3], dtype=types.FLOAT, device="cpu")
rel_start = fn.random.uniform(range=[0.0, 0.3], shape=(2,), dtype=types.FLOAT, device="gpu")
rel_shape = fn.random.uniform(range=[0.4, 0.6], shape=(2,), dtype=types.FLOAT, device="gpu")
sliced = fn.slice(fake_data, rel_start, rel_shape, device="cpu")
return sliced
with assert_raises(ValueError, glob="An operator with device='cpu' cannot accept GPU inputs"):
p = make_pipe()
p.build()
p.run()
def test_wrong_backend_named_args():
@pipeline_def(batch_size=1, num_threads=1, device_id=0)
def make_pipe():
fake_data = fn.constant(idata=0, shape=[10, 10, 3], dtype=types.FLOAT, device="cpu")
rel_start = fn.random.uniform(range=[0.0, 0.3], shape=(2,), dtype=types.FLOAT, device="gpu")
rel_shape = fn.random.uniform(range=[0.4, 0.6], shape=(2,), dtype=types.FLOAT, device="gpu")
sliced = fn.slice(fake_data, rel_start=rel_start, rel_shape=rel_shape, device="cpu")
return sliced
with assert_raises(RuntimeError,
glob="Named arguments inputs to operators must be CPU data nodes"):
p = make_pipe()
p.build()
p.run()
|
DALI-main
|
dali/test/python/operator_1/test_slice.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import pipeline_def
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import os
from test_utils import get_dali_extra_path, check_batch
test_data_root = get_dali_extra_path()
images_dir = os.path.join(test_data_root, 'db', 'single', 'jpeg')
@pipeline_def
def pipe_gaussian_noise(mean, stddev, variable_dist_params, device=None):
encoded, _ = fn.readers.file(file_root=images_dir)
in_data = fn.cast(
fn.decoders.image(encoded, device="cpu", output_type=types.RGB),
dtype=types.FLOAT
)
if device == 'gpu':
in_data = in_data.gpu()
mean_arg = mean
stddev_arg = stddev
if variable_dist_params:
mean_arg = fn.random.uniform(range=(-50.0, 50.0))
stddev_arg = fn.random.uniform(range=(1.0, 10.0))
seed = 12345
out_data1 = fn.noise.gaussian(in_data, mean=mean_arg, stddev=stddev_arg, seed=seed)
out_data2 = in_data + fn.random.normal(in_data, mean=mean_arg, stddev=stddev_arg, seed=seed)
return out_data1, out_data2
def _testimpl_operator_noise_gaussian_vs_add_normal_dist(device, mean, stddev, variable_dist_params,
batch_size, niter):
pipe = pipe_gaussian_noise(mean, stddev, variable_dist_params,
device=device, batch_size=batch_size, num_threads=3, device_id=0)
pipe.build()
for _ in range(niter):
out0, out1 = pipe.run()
check_batch(out0, out1, batch_size=batch_size, eps=0.1)
def test_operator_noise_gaussian_vs_add_normal_dist():
niter = 3
for device in ("cpu", "gpu"):
for batch_size in (1, 3):
for mean, stddev, variable_dist_params in [(10.0, 57.0, False), (0.0, 0.0, True)]:
yield _testimpl_operator_noise_gaussian_vs_add_normal_dist, \
device, mean, stddev, variable_dist_params, batch_size, niter
|
DALI-main
|
dali/test/python/operator_1/test_noise_gaussian.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import pipeline_def
import nvidia.dali.types as types
import nvidia.dali.fn as fn
import numpy as np
import cv2
from scipy.ndimage import convolve1d, filters as sp_filters
import os
from nose_utils import assert_raises
from nose.plugins.attrib import attr
from sequences_test_utils import video_suite_helper, ArgCb
from test_utils import get_dali_extra_path, check_batch, RandomlyShapedDataIterator
data_root = get_dali_extra_path()
images_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
test_iters = 4
min_window_size = 3
max_window_size = 31 # it is maximal window size supported by opencv
shape_layout_axes_cases = [((20, 20, 30, 3), "DHWC", 3), ((20, 20, 30), "", 3),
((20, 30, 3), "HWC", 2), ((20, 30), "HW", 2),
((3, 30, 20), "CWH", 2), ((5, 20, 30, 3), "FHWC", 2),
((5, 10, 10, 7, 3), "FDHWC", 3),
((5, 3, 20, 30), "FCHW", 2),
((3, 5, 10, 10, 7), "FCDHW", 3)]
def to_batch(tl, batch_size):
return [np.array(tl[i]) for i in range(batch_size)]
# Simple check if laplacian of a square matrix that has zeros everywhere except
# the middle cell, gives sum of separable convolution kernels used to compute
# the partial derivatives, i.e. sum of the products of 1D convolution windows.
def _test_kernels(device, num_dims, smoothing, normalize):
batch_size = (max_window_size + 2 - min_window_size) // 2
def get_inputs():
ones = []
window_sizes = []
smoothing_sizes = []
scales = []
padding = 2
for win_size in range(min_window_size, max_window_size + 2, 2):
a_size = win_size + padding
a = np.zeros((a_size,) * num_dims, dtype=np.float32)
a[(a_size // 2,) * num_dims] = 1
ones.append(a)
window_sizes.append(np.array(win_size, dtype=np.int32))
if smoothing:
smoothing_sizes.append(np.array(win_size, dtype=np.int32))
exponent = num_dims * win_size - 2 - num_dims
else:
smoothing_sizes.append(np.array(1, dtype=np.int32))
exponent = win_size - 3
scales.append(np.array(2.**(-exponent), dtype=np.float32))
return ones, window_sizes, smoothing_sizes, scales
@pipeline_def
def pipeline():
ones, window_sizes, smoothing_sizes, scales = fn.external_source(
get_inputs, num_outputs=4)
if device == "gpu":
ones = ones.gpu()
kernels = fn.laplacian(
ones, window_size=window_sizes, smoothing_size=smoothing_sizes,
dtype=types.FLOAT, normalized_kernel=normalize, device=device)
return kernels, scales
def outer(*vs):
acc = np.array([1.])
for v in vs:
acc = np.outer(acc, v)
return acc.reshape(tuple(len(v) for v in vs))
def get_cv2_kernel(win_size, smoothing):
d, s = cv2.getDerivKernels(2, 0, win_size)
if not smoothing:
s = np.zeros(win_size)
s[win_size // 2] = 1.
windows = [[d if i == j else s for j in range(
num_dims)] for i in range(num_dims)]
return sum(outer(*ws) for ws in windows)
pipe = pipeline(num_threads=4, batch_size=batch_size, device_id=0)
pipe.build()
(kernels, scales) = pipe.run()
if device == "gpu":
kernels = kernels.as_cpu()
kernels = [np.array(ker)[(slice(1, -1),) * num_dims] for ker in kernels]
scales = [np.array(sf).item() for sf in scales]
win_sizes = range(min_window_size, max_window_size + 2, 2)
assert len(kernels) == len(win_sizes) == len(scales)
baseline_kernels = [
get_cv2_kernel(win_size, smoothing) * scale
for win_size, scale in zip(win_sizes, scales)]
if not normalize: # output was not normalized by the op
kernels = [kernel * scale for kernel, scale in zip(kernels, scales)]
check_batch(kernels, baseline_kernels, batch_size,
max_allowed_error=1e-5, expected_layout="HWC")
def test_kernels():
for device in ["cpu", "gpu"]:
for num_dims in [1, 2, 3]:
for normalize in [True, False]:
for smoothing in [True, False]:
yield _test_kernels, device, num_dims, smoothing, normalize
@pipeline_def
def laplacian_pipe(device, window_size, in_type, out_type, normalize, grayscale):
# use OpenCV convention - window size 1 implies deriv kernel of size 3 and no smoothing
if window_size == 1:
window_size, smoothing_size = 3, 1
else:
smoothing_size = None
imgs, _ = fn.readers.file(file_root=images_dir, shard_id=0, num_shards=1)
output_type = types.GRAY if grayscale else types.RGB
imgs = fn.decoders.image(imgs, device="cpu", output_type=output_type)
if in_type != types.UINT8:
imgs = fn.cast(imgs, dtype=in_type)
if device == "gpu":
imgs = imgs.gpu()
if out_type == in_type:
out_type = None
edges = fn.laplacian(imgs, window_size=window_size, smoothing_size=smoothing_size,
normalized_kernel=normalize, dtype=out_type, device=device)
return edges, imgs
def laplacian_cv(imgs, window_size, in_type, out_type, scale, grayscale):
if out_type == types.UINT8 or (out_type is None and in_type == types.UINT8):
ddepth = cv2.CV_8U
else:
ddepth = cv2.CV_32F
imgs = [
cv2.Laplacian(img, ddepth=ddepth, ksize=window_size,
borderType=cv2.BORDER_REFLECT_101, scale=scale)
for img in imgs]
if grayscale:
imgs = [np.expand_dims(img, axis=2) for img in imgs]
return imgs
def normalization_factor(window_size):
exponent = 0 if window_size == 1 else 2 * window_size - 4
return 2.**(-exponent)
def _test_vs_open_cv(device, batch_size, window_size, in_type, out_type, normalize, grayscale):
pipe = laplacian_pipe(
device_id=0, device=device, num_threads=4, batch_size=batch_size,
window_size=window_size, in_type=in_type, out_type=out_type,
normalize=normalize, grayscale=grayscale)
pipe.build()
norm_factor = normalization_factor(window_size)
scale = 1 if not normalize else norm_factor
for _ in range(test_iters):
edges, imgs = pipe.run()
if device == "gpu":
edges = edges.as_cpu()
imgs = imgs.as_cpu()
imgs = to_batch(imgs, batch_size)
baseline_cv = laplacian_cv(
imgs, window_size, in_type, out_type, scale, grayscale)
edges = to_batch(edges, batch_size)
actual_out_type = out_type if out_type is not None else in_type
assert len(edges) == len(baseline_cv)
if actual_out_type == types.FLOAT:
max_error = 1e-7 if window_size <= 11 else 1e-4
else:
max_error = 1
# values in the array raise exponentially with the window_size, so without normalization
# the absolute error will also be big - normalize the values before the comparison
if not normalize:
edges = [a * norm_factor for a in edges]
baseline_cv = [a * norm_factor for a in baseline_cv]
check_batch(edges, baseline_cv, batch_size,
max_allowed_error=max_error, expected_layout="HWC")
def test_vs_open_cv():
batch_size = 10
for device in ["cpu", "gpu"]:
# they are independent parameters, it's just not to go overboard with test cases
for normalize, grayscale in ((True, False), (False, True)):
# For bigger windows and uint8 mode, cv2 seems to use some integral type that
# saturates too early (in any case the resulting picture is mostly black and
# different from the result of running cv2.laplacian with floats and then
# clamping the results)
for window_size in range(1, 13, 2):
yield _test_vs_open_cv, device, batch_size, window_size, types.UINT8, None, \
normalize, grayscale
@attr('slow')
def slow_test_vs_open_cv():
batch_size = 10
for device in ["cpu", "gpu"]:
# they are independent parameters, it's just not to go overboard with test cases
for normalize, grayscale in ((True, False), (False, True)):
for in_type, out_type in ((types.UINT8, types.FLOAT), (types.FLOAT, None)):
for window_size in [1] + list(range(3, max_window_size + 2, 4)):
yield _test_vs_open_cv, device, batch_size, window_size, in_type, out_type, \
normalize, grayscale
def laplacian_sp(input, out_type):
output = [sp_filters.laplace(
sample, output=out_type, mode='mirror') for sample in input]
return output
def _test_vs_scipy(device, batch_size, num_dims, in_type, out_type):
shape = (30,) * num_dims
# scipy supports only windows of size 3 and does not use smoothing
window_size, smoothing_size = 3, 1
data = RandomlyShapedDataIterator(
batch_size, max_shape=shape, dtype=in_type)
@pipeline_def
def pipeline():
if out_type == np.float32:
dtype_args = {'dtype': types.FLOAT}
else:
dtype_args = {}
input = fn.external_source(data)
if device == "gpu":
input = input.gpu()
edges = fn.laplacian(input, window_size=window_size, device=device,
smoothing_size=smoothing_size, **dtype_args)
return edges, input
pipe = pipeline(
device_id=0, num_threads=4, batch_size=batch_size)
pipe.build()
for _ in range(test_iters):
edges, input = pipe.run()
if device == "gpu":
edges = edges.as_cpu()
input = input.as_cpu()
edges = to_batch(edges, batch_size)
input = to_batch(input, batch_size)
baseline = laplacian_sp(input, out_type)
max_error = 1e-6
check_batch(edges, baseline, batch_size, max_allowed_error=max_error)
def test_vs_scipy():
batch_size = 10
for device in ["cpu", "gpu"]:
for num_dims in [1, 2, 3]:
# scipy simply wraps integers instead of saturating them, so uint8 inputs won't match
for in_type in [np.int16, np.int32, np.int64, np.float32]:
output_types = [None] if in_type == np.float32 else [
None, np.float32]
for out_type in output_types:
yield _test_vs_scipy, device, batch_size, num_dims, in_type, out_type
def convert_sat(img, out_type):
iinfo = np.iinfo(out_type)
min_v, max_v = iinfo.min, iinfo.max
img = np.clip(img, min_v, max_v)
return img.astype(out_type)
def spread_values(out, axes):
out = out.reshape(-1)
if len(out) == 0:
return [3] * axes
if len(out) == 1:
return [out[0]] * axes
if len(out) == axes:
return [out[i] for i in range(axes)]
assert False
def get_windows(window_sizes):
axes = len(window_sizes)
d_windows = {window_sizes[i][i]: None for i in range(axes)}
s_windows = {window_sizes[i][j]: None for i in range(
axes) for j in range(axes) if i != j}
for window_size in d_windows:
d, s = cv2.getDerivKernels(2, 0, ksize=window_size)
d_windows[window_size] = d.reshape(-1)
if window_size > 1 and window_size in s_windows and s_windows[window_size] is None:
s_windows[window_size] = s.reshape(-1)
for window_size in s_windows:
if s_windows[window_size] is None:
if window_size == 1:
s_windows[window_size] = np.array([1.], dtype=np.float32)
else:
_, s = cv2.getDerivKernels(2, 0, ksize=window_size)
s_windows[window_size] = s.reshape(-1)
return [[
(d_windows if i == j else s_windows)[window_sizes[i][j]]
for j in range(axes)] for i in range(axes)]
def get_window_sizes(window_size, smoothing_size, axes):
window_sizes = spread_values(window_size, axes)
if len(smoothing_size.reshape(-1)) == 0:
return [[window_sizes[i]] * axes for i in range(axes)]
else:
smoothing_sizes = spread_values(smoothing_size, axes)
return [
[window_sizes[j] if i == j else smoothing_sizes[j] for j in range(axes)]
for i in range(axes)]
def laplacian_baseline(img, out_type, window_size, smoothing_size, scale, axes, skip_axes=0):
scales = spread_values(scale, axes)
all_sizes = get_window_sizes(window_size, smoothing_size, axes)
acc = np.zeros(img.shape, dtype=np.float32)
img = np.float32(img)
all_windows = get_windows(all_sizes)
for windows, scale in zip(all_windows, scales):
partial = img
for i in reversed(range(axes)):
axis = i + skip_axes
if img.shape[axis] == 1:
mode = "nearest"
else:
mode = "mirror"
partial = convolve1d(partial, windows[i], axis, mode=mode)
acc += scale * partial
if out_type == np.float32:
return acc
else:
return convert_sat(acc, out_type)
def count_skip_axes(layout):
if layout.startswith("FC") or layout.startswith("CF"):
return 2
if layout.startswith("F") or layout.startswith("C"):
return 1
return 0
@pipeline_def
def laplacian_per_sample_pipeline(device, iterator, layout, window_dim, smoothing_dim, axes,
normalize, out_type):
data = fn.external_source(iterator, layout=layout)
if window_dim is None:
window_size = 3
w_exponent = 0
window_arg = None
else:
window_shape = [axes for _ in range(window_dim)]
window_size = fn.random.uniform(
range=[1, max_window_size // 2], shape=window_shape,
dtype=types.INT32) * 2 + 1
window_arg = window_size
w_exponent = window_size - 3
if smoothing_dim is None:
smoothing_size = None
s_exponent = (window_size - 1) * (axes - 1)
else:
smoothing_shape = [axes for _ in range(smoothing_dim)]
smoothing_size = fn.random.uniform(
range=[0, max_window_size // 2], shape=smoothing_shape,
dtype=types.INT32) * 2 + 1
if smoothing_dim == 1:
s_exponent = fn.reductions.sum(
smoothing_size, axes=0) - smoothing_size - axes + 1
else:
s_exponent = (smoothing_size - 1) * (axes - 1)
exponent = w_exponent + s_exponent
scale = 2.**(-exponent)
kwargs = {'normalized_kernel': True} if normalize else {'scale': scale}
if out_type == np.float32:
kwargs['dtype'] = types.FLOAT
if device == "gpu":
data = data.gpu()
edges = fn.laplacian(data, window_size=window_arg, device=device,
smoothing_size=smoothing_size, **kwargs)
if smoothing_size is None:
smoothing_size = np.array([], dtype=np.int32)
if window_arg is None:
window_arg = np.array([], dtype=np.int32)
return edges, data, window_arg, smoothing_size, scale
def check_per_sample_laplacian(device, batch_size, window_dim, smoothing_dim, normalize,
shape, layout, axes, in_type, out_type):
iterator = RandomlyShapedDataIterator(
batch_size, max_shape=shape, dtype=in_type)
pipe = laplacian_per_sample_pipeline(
device_id=0, device=device, num_threads=4, batch_size=batch_size, seed=42,
iterator=iterator, layout=layout, window_dim=window_dim, smoothing_dim=smoothing_dim,
axes=axes, normalize=normalize, out_type=out_type)
pipe.build()
for _ in range(test_iters):
edges, data, window_size, smoothing_size, scale = pipe.run()
if device == "gpu":
edges = edges.as_cpu()
data = data.as_cpu()
edges, data, window_size, smoothing_size, scale = [
to_batch(out, batch_size)
for out in (edges, data, window_size, smoothing_size, scale)]
baseline = []
for i in range(batch_size):
skip_axes = count_skip_axes(layout)
sample_baseline = laplacian_baseline(
data[i], out_type or in_type, window_size[i], smoothing_size[i],
scale[i], axes, skip_axes)
baseline.append(sample_baseline)
if out_type == np.float32:
# Normalized abs values are up to 2 * `axes` * 255 so it still gives
# over 5 decimal digits of precision
max_error = 1e-3
else:
max_error = 1
check_batch(edges, baseline, batch_size,
max_allowed_error=max_error, expected_layout=layout)
def test_per_sample_laplacian():
batch_size = 10
for device in ["cpu", "gpu"]:
for in_type in [np.uint8]:
for out_type in [None, np.float32]:
for shape, layout, axes in shape_layout_axes_cases:
for normalize in [True, False]:
yield check_per_sample_laplacian, device, batch_size, 1, 1, \
normalize, shape, layout, axes, in_type, out_type
@attr('slow')
def slow_test_per_sample_laplacian():
batch_size = 10
for device in ["cpu", "gpu"]:
for in_type in [np.int16, np.int32, np.float32]:
for out_type in [None, np.float32]:
if out_type == in_type:
continue
for shape, layout, axes in shape_layout_axes_cases:
full_test = [None, 0, 1]
for window_dim in full_test if in_type == np.float32 else [1]:
for smoothing_dim in full_test if in_type == np.float32 else [1]:
for normalize in [True, False]:
yield check_per_sample_laplacian, device, batch_size, window_dim, \
smoothing_dim, normalize, shape, layout, axes, in_type, \
out_type
def check_fixed_param_laplacian(device, batch_size, in_type, out_type, shape, layout, axes,
window_size, smoothing_size, scales, normalize):
iterator = RandomlyShapedDataIterator(
batch_size, max_shape=shape, dtype=in_type)
@pipeline_def
def pipeline():
data = fn.external_source(iterator, layout=layout)
if out_type != np.float32:
dtype_arg = {}
else:
dtype_arg = {"dtype": types.FLOAT}
if device == "gpu":
data = data.gpu()
edges = fn.laplacian(data, window_size=window_size, smoothing_size=smoothing_size,
scale=scales, normalized_kernel=normalize, **dtype_arg)
return edges, data
pipe = pipeline(
device_id=0, num_threads=4, batch_size=batch_size, seed=42)
pipe.build()
for _ in range(test_iters):
edges, data = pipe.run()
if device == "gpu":
edges = edges.as_cpu()
data = data.as_cpu()
edges = to_batch(edges, batch_size)
data = to_batch(data, batch_size)
baseline = []
for i in range(batch_size):
skip_axes = count_skip_axes(layout)
window_size = np.array([]) if window_size is None else np.array(
window_size, dtype=np.int32)
smoothing_size = np.array([]) if smoothing_size is None else np.array(
smoothing_size, dtype=np.int32)
if normalize:
all_sizes = get_window_sizes(window_size, smoothing_size, axes)
scales = [2.**(-sum(sizes) + axes + 2) for sizes in all_sizes]
scales = np.array(scales, dtype=np.float32)
sample = laplacian_baseline(
data[i], out_type or in_type, window_size, smoothing_size,
scales, axes, skip_axes)
baseline.append(sample)
if out_type == np.float32:
max_error = 1e-3
else:
max_error = 1
check_batch(edges, baseline, batch_size,
max_allowed_error=max_error, expected_layout=layout)
@attr('slow')
def slow_test_fixed_params_laplacian():
batch_size = 10
window_size_cases = {
1: [None, 3, 5, 9, 21],
2: [None, [3, 3], 11, [9, 5], [3, 17]],
3: [None, [3, 5, 7], [3, 3, 3], 11, [23, 7, 11]],
}
smoothing_size_cases = {
1: [None, 1, 3, 11, 21],
2: [None, [1, 3], 1, 11, [9, 5]],
3: [None, [3, 5, 7], 1, 11, [9, 7, 1]],
}
def window_scales(window_sizes, smoothing_sizes, axes):
window_sizes = np.array([]) if window_sizes is None else np.array(window_sizes)
smoothing_sizes = np.array([]) if smoothing_sizes is None else np.array(smoothing_sizes)
all_sizes = get_window_sizes(window_sizes, smoothing_sizes, axes)
scales = [2.**(-sum(sizes) + axes + 2) for sizes in all_sizes]
cases = [scales]
if all(scales[0] == s for s in scales):
cases.append([scales[0]])
return [[v * factor for v in case] for case in cases for factor in [1 / 16, 4.]]
for device in ["cpu", "gpu"]:
for in_type in [np.uint8, np.int32, np.int64, np.float32]:
for out_type in [None, np.float32]:
if in_type == out_type:
continue
for shape, layout, axes in shape_layout_axes_cases:
for window_sizes in window_size_cases[axes]:
for smooth_sizes in smoothing_size_cases[axes]:
for normalize in [True, False]:
if normalize:
scale_cases = [None]
else:
scale_cases = window_scales(window_sizes, smooth_sizes, axes)
for scales in scale_cases:
yield check_fixed_param_laplacian, device, batch_size, \
in_type, out_type, shape, layout, axes, \
window_sizes, smooth_sizes, scales, normalize
def check_build_time_fail(device, batch_size, shape, layout, axes, window_size, smoothing_size,
scale, normalize, err_regex):
with assert_raises(RuntimeError, regex=err_regex):
check_fixed_param_laplacian(
device, batch_size, np.uint8, None, shape, layout, axes, window_size,
smoothing_size, scale, normalize)
def check_tensor_input_fail(device, batch_size, shape, layout, window_size, smoothing_size,
scale, normalize, dtype, err_regex):
iterator = RandomlyShapedDataIterator(
batch_size, max_shape=shape, dtype=np.uint8)
def gen_params():
return np.array(window_size, dtype=np.int32), np.array(smoothing_size, dtype=np.int32), \
np.array(scale, dtype=np.float32)
@pipeline_def
def pipeline():
data = fn.external_source(iterator, layout=layout)
window_size, smoothing_size, scale = fn.external_source(
gen_params, batch=False, num_outputs=3)
if device == "gpu":
data = data.gpu()
edges = fn.laplacian(data, window_size=window_size, smoothing_size=smoothing_size,
scale=scale, normalized_kernel=normalize, dtype=dtype, device=device)
return edges, data
with assert_raises(RuntimeError, regex=err_regex):
pipe = pipeline(device_id=0, num_threads=4, batch_size=batch_size)
pipe.build()
pipe.run()
def test_fail_laplacian():
args = [
((20, 20, 30, 3), "DHCW", 3,
"Only channel-first or channel-last layouts are supported, got: .*\\."),
((5, 20, 30, 3), "HFWC", 2,
"For sequences, layout should begin with 'F' or 'C', got: .*\\."),
((5, 10, 10, 10, 7, 3), "FWXYZC", 4,
"Too many dimensions, found: \\d+ data axes, maximum supported is: 3\\."),
((5, 3, 20, 3, 30), "FCHCW", 2,
"Only channel-first or channel-last layouts are supported, got: .*\\."),
((5, 3, 20, 3, 30), "FCCHW", 2,
"Found more the one occurrence of 'F' or 'C' axes in layout: .*\\."),
((5, 3), "CF", 2, "No spatial axes found in the layout"),
]
for device in "cpu", "gpu":
for shape, layout, axes, err_regex in args:
yield check_build_time_fail, device, 10, shape, layout, axes, 11, 11, 1., \
False, err_regex
yield check_tensor_input_fail, device, 10, (10, 10, 3), "HWC", 11, 11, 1., False, \
types.UINT16, "Output data type must be same as input, FLOAT or skipped"
yield check_build_time_fail, device, 10, (10, 10, 3), "HWC", 2, 11, 11, 1., True, \
"Parameter ``scale`` cannot be specified when ``normalized_kernel`` is set to True"
for window_size in [-3, 10, max_window_size + 1]:
yield check_build_time_fail, device, 10, (10, 10, 3), "HWC", 2, window_size, 5, 1., \
False, "Window size must be an odd integer between 3 and \\d"
yield check_tensor_input_fail, device, 10, (10, 10, 3), "HWC", window_size, 5, 1., \
False, types.FLOAT, "Window size must be an odd integer between 3 and \\d"
for window_size in [[3, 6], -1, max_window_size + 1]:
yield check_build_time_fail, device, 10, (10, 10, 3), "HWC", 2, 3, window_size, 1., \
False, "Smoothing window size must be an odd integer between 1 and \\d"
for window_size in [6, -1, max_window_size + 1]:
yield check_tensor_input_fail, device, 10, (10, 10, 3), "HWC", 3, window_size, 1., \
False, types.FLOAT, \
"Smoothing window size must be an odd integer between 1 and \\d"
for window_size in [[3, 7, 3], [7, 7, 7, 7, 7]]:
yield check_build_time_fail, device, 10, (10, 10, 3), "HWC", 2, window_size, 11, 1., \
False, (f"Argument \"window_size\" expects either a single value "
f"or a list of 2 elements. {len(window_size)} given")
yield check_tensor_input_fail, device, 10, (10, 10, 3), "HWC", window_size, 11, 1., \
False, types.FLOAT, (f"Argument window_size for sample 0 is expected to have "
f"1 or 2 elements, got: {len(window_size)}")
for scale in [[3, 7, 3], [7, 7, 7, 7, 7]]:
yield check_build_time_fail, device, 10, (10, 10, 3), "HWC", 2, 3, 3, scale, False, \
(f"Argument \"scale\" expects either a single value or a list "
f"of 2 elements. {len(scale)} given.")
yield check_tensor_input_fail, device, 10, (10, 10, 3), "HWC", 5, 5, scale, False, \
types.FLOAT, (f"Argument scale for sample 0 is expected to have "
f"1 or 2 elements, got: {len(scale)}")
def test_per_frame():
def window_size(sample_desc):
return np.array(2 * sample_desc.rng.randint(1, 15) + 1, dtype=np.int32)
def per_axis_window_size(sample_desc):
return np.array([window_size(sample_desc) for _ in range(2)])
def per_axis_smoothing_size(sample_desc):
return np.array([2 * sample_desc.rng.randint(0, 15) + 1 for _ in range(2)], dtype=np.int32)
def per_axis_scale(sample_desc):
def scale(sample_desc):
k = 2 * sample_desc.rng.randint(0, 15) + 1
return np.array(2. ** -k, dtype=np.float32)
return np.array([scale(sample_desc) for _ in range(2)])
video_test_cases = [
(fn.laplacian, {}, []),
(fn.laplacian, {}, [ArgCb("window_size", window_size, True)]),
(fn.laplacian, {}, [ArgCb("window_size", per_axis_window_size, True)]),
(fn.laplacian, {'dtype': types.FLOAT},
[ArgCb("scale", per_axis_scale, True)]),
(fn.laplacian, {}, [
ArgCb("window_size", per_axis_window_size, True),
ArgCb("smoothing_size", per_axis_smoothing_size, True)]),
(fn.laplacian, {}, [
ArgCb("window_size", per_axis_window_size, True),
ArgCb("smoothing_size", per_axis_smoothing_size, True),
ArgCb("scale", per_axis_scale, True)]),
]
yield from video_suite_helper(video_test_cases, expand_channels=True)
|
DALI-main
|
dali/test/python/operator_1/test_laplacian.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali as dali
import nvidia.dali.fn as fn
import nvidia.dali.math as math
import nvidia.dali.ops as ops
import nvidia.dali.types as types
from nvidia.dali import Pipeline, pipeline_def
from nvidia.dali.backend_impl import TensorListGPU
from nose_utils import assert_raises
from test_utils import RandomlyShapedDataIterator, \
generator_random_axes_for_3d_input, \
generator_random_data, \
as_array
class PadSynthDataPipeline(Pipeline):
def __init__(self, device, batch_size, iterator, layout="HWC", num_threads=1, device_id=0,
num_gpus=1, axes=(), axis_names="", align=(), shape_arg=()):
super().__init__(batch_size, num_threads, device_id, seed=1234)
self.device = device
self.layout = layout
self.iterator = iterator
self.inputs = ops.ExternalSource()
self.pad = ops.Pad(device=self.device, axes=axes, axis_names=axis_names, align=align,
shape=shape_arg)
def define_graph(self):
self.data = self.inputs()
input_data = self.data
data = input_data.gpu() if self.device == 'gpu' else input_data
out = self.pad(data)
return input_data, out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
def check_pad(device, batch_size, input_max_shape, axes, axis_names, align, shape_arg):
eii = RandomlyShapedDataIterator(batch_size, max_shape=input_max_shape)
layout = "HWC"
pipe = PadSynthDataPipeline(device, batch_size, iter(eii), axes=axes, axis_names=axis_names,
align=align, shape_arg=shape_arg, layout=layout)
pipe.build()
if axis_names:
axes = []
for axis_name in axis_names:
axis_idx = layout.find(axis_name)
assert axis_idx >= 0
axes.append(axis_idx)
actual_axes = axes if (axes and len(axes) > 0) else range(len(input_max_shape))
assert len(actual_axes) > 0
if not shape_arg or len(shape_arg) == 0:
shape_arg = [-1] * len(actual_axes)
assert len(shape_arg) == len(actual_axes)
if not align or len(align) == 0:
align = [1] * len(actual_axes)
elif len(align) == 1 and len(actual_axes) > 1:
align = [align[0] for _ in actual_axes]
assert len(align) == len(actual_axes)
for k in range(5):
out1, out2 = pipe.run()
out1_data = out1.as_cpu() if isinstance(out1[0], dali.backend_impl.TensorGPU) else out1
max_shape = [-1] * len(input_max_shape)
for i in range(len(actual_axes)):
dim = actual_axes[i]
align_val = align[i]
shape_arg_val = shape_arg[i]
for i in range(batch_size):
input_shape = out1_data.at(i).shape
if input_shape[dim] > max_shape[dim]:
max_shape[dim] = input_shape[dim]
out2_data = out2.as_cpu() if isinstance(out2[0], dali.backend_impl.TensorGPU) else out2
for i in range(batch_size):
input_shape = out1_data.at(i).shape
output_shape = out2_data.at(i).shape
for j in range(len(actual_axes)):
dim = actual_axes[j]
align_val = align[j]
shape_arg_val = shape_arg[j]
if shape_arg_val >= 0:
in_extent = input_shape[dim]
expected_extent = in_extent if in_extent > shape_arg_val else shape_arg_val
else:
expected_extent = max_shape[dim]
remainder = expected_extent % align_val
if remainder > 0:
expected_extent = expected_extent - remainder + align_val
assert output_shape[dim] == expected_extent
def test_pad():
for device in ["cpu", "gpu"]:
for batch_size in {1, 8}:
for input_max_shape, axes, axis_names, align, shape_arg in [
((200, 400, 3), (0,), None, None, None),
((200, 400, 3), None, "H", None, None),
((200, 400, 3), (1,), None, None, None),
((200, 400, 3), None, "W", None, None),
((200, 400, 3), (0, 1), None, None, None),
((200, 400, 3), None, "HW", None, None),
((200, 400, 3), (), None, None, None),
((200, 400, 3), [], None, None, None),
((200, 400, 3), None, "", None, None),
((200, 400, 3), (2,), None, (4,), None),
((200, 400, 3), None, "C", (4,), None),
((200, 400, 3), (0, 1), None, (256, 256), None),
((200, 400, 3), None, "HW", (256, 256), None),
((200, 400, 3), (0, 1), None, (16, 64), None),
((200, 400, 3), None, "HW", (16, 64), None),
((200, 400, 3), (0, 1), None, (256,), None),
((200, 400, 3), None, "HW", (256,), None),
((200, 400, 3), None, None, None, (-1, -1, 4)),
((25, 100, 3), (0,), None, None, (25,)),
((200, 400, 3), (0, 1), None, (4, 16), (1, 200))
]:
yield check_pad, device, batch_size, input_max_shape, axes, axis_names, align, \
shape_arg
def test_pad_error():
batch_size = 2
input_max_shape = (5, 5, 3)
device = "cpu"
axes = None
layout = "HWC"
axis_names = "H"
align = 0
shape_arg = None
eii = RandomlyShapedDataIterator(batch_size, max_shape=input_max_shape)
pipe = PadSynthDataPipeline(device, batch_size, iter(eii), axes=axes, axis_names=axis_names,
align=align, shape_arg=shape_arg, layout=layout)
pipe.build()
with assert_raises(RuntimeError, glob='Values of `align` argument must be positive.'):
pipe.run()
def is_aligned(sh, align, axes):
assert len(sh) == len(align)
for i, axis in enumerate(axes):
if sh[axis] % align[i] > 0:
return False
return True
def check_pad_per_sample_shapes_and_alignment(device='cpu', batch_size=3, ndim=2, num_iter=3):
pipe = Pipeline(batch_size=batch_size, num_threads=3, device_id=0, seed=1234)
axes = (0, 1)
with pipe:
in_shape = fn.cast(fn.random.uniform(range=(10, 20), shape=(ndim,)), dtype=types.INT32)
in_data = fn.random.uniform(range=(0., 1.), shape=in_shape)
if device == 'gpu':
in_data = in_data.gpu()
req_shape = fn.cast(fn.random.uniform(range=(21, 30), shape=(ndim,)), dtype=types.INT32)
req_align = fn.cast(fn.random.uniform(range=(3, 5), shape=(ndim,)), dtype=types.INT32)
out_pad_shape = fn.pad(in_data, axes=axes, align=None, shape=req_shape)
out_pad_align = fn.pad(in_data, axes=axes, align=req_align, shape=None)
out_pad_both = fn.pad(in_data, axes=axes, align=req_align, shape=req_shape)
pipe.set_outputs(in_shape, in_data, req_shape, req_align, out_pad_shape, out_pad_align,
out_pad_both)
pipe.build()
for _ in range(num_iter):
outs = [out.as_cpu() if isinstance(out, TensorListGPU) else out for out in pipe.run()]
for i in range(batch_size):
in_shape, in_data, req_shape, req_align, out_pad_shape, out_pad_align, out_pad_both = \
[outs[out_idx].at(i) for out_idx in range(len(outs))]
assert (in_shape == in_data.shape).all()
# Pad to explicit shape
assert (out_pad_shape.shape >= in_shape).all()
assert (req_shape == out_pad_shape.shape).all()
# Alignment only
assert (out_pad_align.shape >= in_shape).all()
assert is_aligned(out_pad_align.shape, req_align, axes)
# Explicit shape + alignment
assert (out_pad_both.shape >= in_shape).all()
assert (req_shape <= out_pad_both.shape).all()
assert is_aligned(out_pad_both.shape, req_align, axes)
def test_pad_per_sample_shapes_and_alignment():
yield check_pad_per_sample_shapes_and_alignment, 'cpu'
yield check_pad_per_sample_shapes_and_alignment, 'gpu'
def check_pad_to_square(device='cpu', batch_size=3, ndim=2, num_iter=3):
pipe = Pipeline(batch_size=batch_size, num_threads=3, device_id=0, seed=1234)
with pipe:
in_shape = fn.cast(fn.random.uniform(range=(10, 20), shape=(ndim,)), dtype=types.INT32)
in_data = fn.reshape(fn.random.uniform(range=(0., 1.), shape=in_shape), layout="HW")
shape = fn.shapes(in_data, dtype=types.INT32)
h = fn.slice(shape, 0, 1, axes=[0])
w = fn.slice(shape, 1, 1, axes=[0])
side = math.max(h, w)
if device == 'gpu':
in_data = in_data.gpu()
out_data = fn.pad(in_data, axis_names="HW", shape=fn.cat(side, side, axis=0))
pipe.set_outputs(in_data, out_data)
pipe.build()
for _ in range(num_iter):
outs = [out.as_cpu() if isinstance(out, TensorListGPU) else out for out in pipe.run()]
for i in range(batch_size):
in_data, out_data = \
[outs[out_idx].at(i) for out_idx in range(len(outs))]
in_shape = in_data.shape
max_side = max(in_shape)
for s in out_data.shape:
assert s == max_side
np.testing.assert_equal(out_data[:in_shape[0], :in_shape[1]], in_data)
np.testing.assert_equal(out_data[in_shape[0]:, :], 0)
np.testing.assert_equal(out_data[:, in_shape[1]:], 0)
def test_pad_to_square():
yield check_pad_to_square, 'cpu'
yield check_pad_to_square, 'gpu'
def check_pad_dynamic_axes(device, batch_size, num_threads, use_negative, use_empty):
shape_arg_desc = (100, 120, np.int32)
get_dynamic_axes = generator_random_axes_for_3d_input(
batch_size, use_negative=use_negative, use_empty=use_empty,
extra_out_desc=[shape_arg_desc])
image_gen = generator_random_data(
batch_size, min_sh=(10, 10, 3), max_sh=(100, 100, 3),
dtype=np.float32, val_range=[0.0, 1.0])
@pipeline_def(batch_size=batch_size, num_threads=num_threads, device_id=0)
def make_pipe():
image = fn.external_source(source=image_gen)
if device == 'gpu':
image = image.gpu()
axes, shape = fn.external_source(source=get_dynamic_axes, num_outputs=2)
fill_value = fn.random.uniform(device='cpu', range=[0.0, 255.0])
pad1 = fn.pad(image, axes=axes, fill_value=fill_value)
pad2 = fn.pad(image, axes=axes, shape=shape, fill_value=fill_value)
return image, axes, shape, pad1, pad2, fill_value
pipe = make_pipe()
pipe.build()
ndim = 3
for _ in range(3):
outs = pipe.run()
max_shape = ndim * [-1]
for sample_idx in range(batch_size):
in_img_sh = as_array(outs[0][sample_idx]).shape
for dim in range(ndim):
if in_img_sh[dim] > max_shape[dim]:
max_shape[dim] = in_img_sh[dim]
for sample_idx in range(batch_size):
in_img = as_array(outs[0][sample_idx])
axes = as_array(outs[1][sample_idx])
naxes = axes.shape[0]
if naxes == 0: # Empty axes mean "all axes"
axes = np.array(range(ndim), dtype=np.int32)
shape = as_array(outs[2][sample_idx])
pad1 = as_array(outs[3][sample_idx])
pad2 = as_array(outs[4][sample_idx])
fill_value = as_array(outs[5][sample_idx])
in_sh = in_img.shape
expected_pad1_sh = np.copy(pad1.shape)
for d in axes:
expected_pad1_sh[d] = max_shape[d]
np.testing.assert_allclose(expected_pad1_sh, pad1.shape)
np.testing.assert_allclose(pad1[:in_sh[0], :in_sh[1], :in_sh[2]], in_img)
np.testing.assert_allclose(pad1[in_sh[0]:, :in_sh[1]:, :in_sh[2]:], fill_value)
expected_pad2_sh = np.copy(pad2.shape)
for d, req_extent in zip(axes, shape):
expected_pad2_sh[d] = req_extent if req_extent > 0 else max_shape[d]
np.testing.assert_allclose(expected_pad2_sh, pad2.shape)
np.testing.assert_allclose(pad2[:in_sh[0], :in_sh[1], :in_sh[2]], in_img)
np.testing.assert_allclose(pad2[in_sh[0]:, :in_sh[1]:, :in_sh[2]:], fill_value)
def test_dynamic_axes():
batch_size = 10
num_threads = 3
for device in ['cpu', 'gpu']:
yield check_pad_dynamic_axes, device, batch_size, num_threads, False, False
def test_negative_axes():
batch_size = 10
num_threads = 3
for device in ['cpu', 'gpu']:
yield check_pad_dynamic_axes, device, batch_size, num_threads, True, False
def test_empty_axes():
batch_size = 10
num_threads = 3
for device in ['cpu', 'gpu']:
yield check_pad_dynamic_axes, device, batch_size, num_threads, False, True
def check_pad_wrong_axes(device, wrong_axes_range=None):
@pipeline_def(batch_size=1, num_threads=1, device_id=0)
def make_pipe():
fake_data = fn.constant(idata=0, shape=[10, 10, 3], dtype=types.FLOAT, device=device)
axes = fn.random.uniform(range=wrong_axes_range, shape=(2,), dtype=types.INT32)
padded = fn.pad(fake_data, axes=axes)
return padded
p = make_pipe()
p.build()
# Note: [[] and []] are '[' and ']' characters.
assert_raises(RuntimeError, p.run,
glob='Axis * out of range. Expected range is [[]-3, 2[]] for a 3D input')
def test_wrong_axes():
for device in ['cpu', 'gpu']:
for wrong_axes_range in [(-10, -4), (3, 10)]:
yield check_pad_wrong_axes, device, wrong_axes_range
|
DALI-main
|
dali/test/python/operator_1/test_pad.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali as dali
import nvidia.dali.fn as fn
import cv2
import numpy as np
import multiprocessing as mp
import test_utils
from nose2.tools import params
NUM_THREADS = mp.cpu_count()
DEV_ID = 0
SEED = 1313
def cv2_median_blur(dst, img, ksize, layout):
if layout[-1] == 'C':
cv2.medianBlur(img, ksize=ksize, dst=dst)
else:
for c in range(img.shape[0]):
cv2.medianBlur(img[c, :, :], ksize, dst=dst[c, :, :])
def ref_func(img, ksize, layout):
ksize = ksize[0]
dst = np.zeros_like(img)
if layout[0] == 'F':
for f in range(0, img.shape[0]):
cv2_median_blur(dst[f, :, :, :], img[f, :, :, :], ksize, layout)
else:
cv2_median_blur(dst, img, ksize, layout)
return dst
@dali.pipeline_def(num_threads=NUM_THREADS, device_id=DEV_ID,
exec_pipelined=False, exec_async=False)
def reference_pipe(data_src, layout, ksize_src):
img = fn.external_source(source=data_src, batch=True, layout=layout)
ksize = fn.external_source(source=ksize_src)
return fn.python_function(img, ksize, function=lambda im, ks: ref_func(im, ks, layout=layout),
batch_processing=False)
@dali.pipeline_def(num_threads=NUM_THREADS, device_id=DEV_ID)
def median_blur_pipe(data_src, layout, ksize_src):
img = fn.external_source(source=data_src, batch=True, layout=layout, device='gpu')
ksize = fn.external_source(source=ksize_src)
ksize = fn.cat(ksize, ksize)
return fn.experimental.median_blur(img, window_size=ksize)
@dali.pipeline_def(num_threads=NUM_THREADS, device_id=DEV_ID)
def median_blur_cksize_pipe(data_src, layout, ksize):
img = fn.external_source(source=data_src, batch=True, layout=layout, device='gpu')
return fn.experimental.median_blur(img, window_size=ksize)
# OpenCV requires ksize to be odd and greater than 1
def ksize_src(bs, lo, hi, seed):
np_rng = np.random.default_rng(seed=seed)
def gen_ksize():
return np_rng.integers(lo // 2, hi // 2 + 1, size=(1), dtype=np.int32) * 2 + 1
while True:
ksize = [gen_ksize() for _ in range(bs)]
yield ksize
@params((32, 'HWC', np.uint8, 3, 9),
(32, 'CHW', np.float32, 4, 5),
(32, 'HWC', np.uint16, 1, 5),
(4, 'FHWC', np.float32, 3, 5),
(4, 'FCHW', np.uint8, 1, 9))
def test_median_blur_vs_ocv(bs, layout, dtype, channels, max_ksize):
cdim = layout.find('C')
min_shape = [64 for c in layout]
min_shape[cdim] = channels
max_shape = [256 for c in layout]
max_shape[cdim] = channels
if layout[0] == 'F':
min_shape[0] = 8
max_shape[0] = 32
data1 = test_utils.RandomlyShapedDataIterator(batch_size=bs, min_shape=min_shape,
max_shape=max_shape, dtype=dtype, seed=SEED)
data2 = test_utils.RandomlyShapedDataIterator(batch_size=bs, min_shape=min_shape,
max_shape=max_shape, dtype=dtype, seed=SEED)
ksize1 = ksize_src(bs, 3, max_ksize, SEED)
ksize2 = ksize_src(bs, 3, max_ksize, SEED)
pipe1 = median_blur_pipe(data_src=data1, layout=layout, ksize_src=ksize1,
batch_size=bs, prefetch_queue_depth=1)
pipe2 = reference_pipe(data_src=data2, layout=layout, ksize_src=ksize2, batch_size=bs)
test_utils.compare_pipelines(pipe1, pipe2, batch_size=bs, N_iterations=10)
@params((32, 'HWC', np.uint8, 3, (7, 7)),
(32, 'CHW', np.float32, 4, 3),
(4, 'FCHW', np.uint8, 1, (9, 9)))
def test_median_blur_const_ksize_vs_ocv(bs, layout, dtype, channels, ksize):
cdim = layout.find('C')
min_shape = [64 for c in layout]
min_shape[cdim] = channels
max_shape = [256 for c in layout]
max_shape[cdim] = channels
if layout[0] == 'F':
min_shape[0] = 8
max_shape[0] = 32
data1 = test_utils.RandomlyShapedDataIterator(batch_size=bs, min_shape=min_shape,
max_shape=max_shape, dtype=dtype, seed=SEED)
data2 = test_utils.RandomlyShapedDataIterator(batch_size=bs, min_shape=min_shape,
max_shape=max_shape, dtype=dtype, seed=SEED)
if isinstance(ksize, tuple):
cv_ksize = ksize[0]
else:
cv_ksize = ksize
ksize1 = ksize_src(bs, cv_ksize, cv_ksize, SEED)
pipe1 = median_blur_cksize_pipe(data_src=data1, layout=layout, ksize=ksize,
batch_size=bs, prefetch_queue_depth=1)
pipe2 = reference_pipe(data_src=data2, layout=layout, ksize_src=ksize1, batch_size=bs)
test_utils.compare_pipelines(pipe1, pipe2, batch_size=bs, N_iterations=10)
|
DALI-main
|
dali/test/python/operator_1/test_median_blur.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import unittest
import numpy as np
from nvidia.dali import pipeline_def, fn, types
from test_utils import get_dali_extra_path
from nose_utils import assert_raises
from nose2.tools import params
from debayer_test_utils import bayer_patterns, blue_position, blue_position2pattern, rgb2bayer, \
rgb2bayer_seq, debayer_bilinear_npp_pattern, debayer_bilinear_npp_pattern_seq
data_root = get_dali_extra_path()
images_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
vid_dir = os.path.join(data_root, "db", "video", "sintel", "video_files")
vid_files = ["sintel_trailer-720p_3.mp4"]
def read_imgs(num_imgs, dtype, seed):
@pipeline_def
def pipeline():
input, _ = fn.readers.file(file_root=images_dir, random_shuffle=True, seed=seed)
return fn.decoders.image(input, device="cpu", output_type=types.RGB)
pipe = pipeline(batch_size=num_imgs, device_id=0, num_threads=4)
pipe.build()
(batch, ) = pipe.run()
return [np.array(img, dtype=dtype) for img in batch]
def read_video(num_sequences, num_frames, height, width, seed=42):
roi_start = (90, 0)
roi_end = (630, 1280)
vid_filenames = [os.path.join(vid_dir, vid_file) for vid_file in vid_files]
@pipeline_def
def pipeline():
video = fn.readers.video_resize(filenames=vid_filenames, name='video reader',
sequence_length=num_frames,
file_list_include_preceding_frame=True, device='gpu',
roi_start=roi_start, roi_end=roi_end, seed=seed,
resize_x=width, resize_y=height)
return video
pipe = pipeline(batch_size=num_sequences, device_id=0, num_threads=4)
pipe.build()
(batch, ) = pipe.run()
return [np.array(seq) for seq in batch.as_cpu()]
def prepare_test_imgs(num_samples, dtype):
assert dtype in (np.uint8, np.uint16)
rng = np.random.default_rng(seed=101)
imgs = read_imgs(num_samples, dtype, seed=42 if dtype == np.uint8 else 13)
if dtype == np.uint16:
imgs = [
np.uint16(img) * 256 + np.uint16(rng.uniform(0, 256, size=img.shape)) for img in imgs
]
bayered_imgs = {
pattern: [rgb2bayer(img, pattern) for img in imgs]
for pattern in bayer_patterns
}
npp_baseline = {
pattern: [debayer_bilinear_npp_pattern(img, pattern) for img in imgs]
for pattern, imgs in bayered_imgs.items()
}
return bayered_imgs, npp_baseline
class DebayerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.num_samples = 7
cls.bayered_imgs, cls.npp_baseline = prepare_test_imgs(cls.num_samples, dtype=np.uint8)
cls.bayered_imgs16t, cls.npp_baseline16t = prepare_test_imgs(cls.num_samples,
dtype=np.uint16)
@classmethod
def get_test_data(cls, dtype):
assert dtype in (np.uint8, np.uint16)
if dtype == np.uint8:
return cls.bayered_imgs, cls.npp_baseline
else:
return cls.bayered_imgs16t, cls.npp_baseline16t
@params(*enumerate(itertools.product([1, 64], bayer_patterns)))
def test_debayer_fixed_pattern(self, i, args):
(batch_size, pattern) = args
num_iterations = 3
test_hwc_single_channel_input = i % 2 == 1
bayered_imgs, npp_baseline = self.get_test_data(np.uint8)
def source(sample_info):
idx = sample_info.idx_in_epoch % self.num_samples
img = bayered_imgs[pattern][idx]
assert len(img.shape) == 2
if test_hwc_single_channel_input:
h, w = img.shape
img = img.reshape(h, w, 1)
return img, np.array(idx, dtype=np.int32)
@pipeline_def
def debayer_pipeline():
bayer_imgs, idxs = fn.external_source(source=source, batch=False, num_outputs=2)
debayered_imgs = fn.experimental.debayer(bayer_imgs.gpu(),
blue_position=blue_position(pattern))
return debayered_imgs, idxs
pipe = debayer_pipeline(batch_size=batch_size, device_id=0, num_threads=4)
pipe.build()
out_batches = []
for _ in range(num_iterations):
debayered_imgs_dev, idxs = pipe.run()
assert debayered_imgs_dev.layout() == "HWC"
out_batches.append(
([np.array(img)
for img in debayered_imgs_dev.as_cpu()], [np.array(idx) for idx in idxs]))
for debayered_imgs, idxs in out_batches:
assert len(debayered_imgs) == len(idxs)
for img_debayered, idx in zip(debayered_imgs, idxs):
baseline = npp_baseline[pattern][idx]
assert np.all(img_debayered == baseline)
@params(*itertools.product([1, 11, 184], [np.uint8, np.uint16]))
def test_debayer_per_sample_pattern(self, batch_size, dtype):
num_iterations = 3
num_patterns = len(bayer_patterns)
rng = np.random.default_rng(seed=42 + batch_size)
bayered_imgs, npp_baseline = self.get_test_data(dtype)
def source(sample_info):
idx = sample_info.idx_in_epoch % self.num_samples
pattern_idx = np.int32(rng.uniform(0, num_patterns))
pattern = bayer_patterns[pattern_idx]
return bayered_imgs[pattern][idx], \
np.array(blue_position(pattern), dtype=np.int32), \
np.array(idx, dtype=np.int32)
@pipeline_def
def debayer_pipeline():
bayer_imgs, blue_poses, idxs = fn.external_source(source=source, batch=False,
num_outputs=3)
debayered_imgs = fn.experimental.debayer(bayer_imgs.gpu(), blue_position=blue_poses)
return debayered_imgs, blue_poses, idxs
pipe = debayer_pipeline(batch_size=batch_size, device_id=0, num_threads=4)
pipe.build()
out_batches = []
for _ in range(num_iterations):
debayered_imgs_dev, blue_poses, idxs = pipe.run()
assert debayered_imgs_dev.layout() == "HWC"
out_batches.append(
([np.array(img) for img in debayered_imgs_dev.as_cpu()],
[blue_position2pattern(np.array(blue_pos))
for blue_pos in blue_poses], [np.array(idx) for idx in idxs]))
for debayered_imgs, patterns, idxs in out_batches:
assert len(debayered_imgs) == len(patterns) == len(idxs)
for img_debayered, pattern, idx in zip(debayered_imgs, patterns, idxs):
baseline = npp_baseline[pattern][idx]
assert np.all(img_debayered == baseline)
class DebayerVideoTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
rng = np.random.default_rng(seed=3)
num_smaller, num_bigger = 4, 3
cls.num_samples = num_smaller + num_bigger
smaller = read_video(num_smaller, 60, 108, 192)
bigger = read_video(num_bigger, 32, 216, 384)
video = smaller + bigger
rng.shuffle(video)
patterns = [rng.choice(bayer_patterns, len(vid)) for vid in video]
cls.blue_poses = [
np.array([blue_position(pattern) for pattern in sample_patterns], dtype=np.int32)
for sample_patterns in patterns
]
cls.bayered_vid = [
rgb2bayer_seq(vid, vid_patterns) for vid, vid_patterns in zip(video, patterns)
]
cls.npp_baseline = [
debayer_bilinear_npp_pattern_seq(vid, vid_patterns)
for vid, vid_patterns in zip(cls.bayered_vid, patterns)
]
def test_debayer_vid_per_frame_pattern(self):
num_iterations = 2
batch_size = (self.num_samples + 1) // 2
def source(sample_info):
idx = sample_info.idx_in_epoch % self.num_samples
vid = self.bayered_vid[idx]
return vid, self.blue_poses[idx], np.array(idx, dtype=np.int32)
@pipeline_def
def debayer_pipeline():
bayered_vid, blue_positions, idxs = fn.external_source(source=source, batch=False,
num_outputs=3,
layout=["FHW", None, None])
debayered_vid = fn.experimental.debayer(bayered_vid.gpu(),
blue_position=fn.per_frame(blue_positions))
return debayered_vid, idxs
pipe = debayer_pipeline(batch_size=batch_size, device_id=0, num_threads=4)
pipe.build()
out_batches = []
for _ in range(num_iterations):
debayered_dev, idxs = pipe.run()
assert debayered_dev.layout() == "FHWC"
out_batches.append(
([np.array(vid) for vid in debayered_dev.as_cpu()], [np.array(idx)
for idx in idxs]))
for debayered_videos, idxs in out_batches:
assert len(debayered_videos) == len(idxs)
for vid_debayered, idx in zip(debayered_videos, idxs):
baseline = self.npp_baseline[idx]
assert np.all(vid_debayered == baseline)
def source_full_array(shape, dtype):
def source(sample_info):
return np.full(shape, sample_info.idx_in_epoch, dtype=dtype)
return source
def _test_shape_pipeline(shape, dtype):
@pipeline_def
def pipeline():
bayer_imgs = fn.external_source(source_full_array(shape, dtype), batch=False)
return fn.experimental.debayer(bayer_imgs.gpu(), blue_position=[0, 0])
pipe = pipeline(batch_size=8, num_threads=4, device_id=0)
pipe.build()
pipe.run()
def test_odd_size_error():
with assert_raises(RuntimeError,
glob="The height and width of the image to debayer must be even"):
_test_shape_pipeline((20, 15), np.uint8)
def test_too_many_channels():
with assert_raises(RuntimeError,
glob=" The debayer operator expects grayscale (i.e. single channel) images"):
_test_shape_pipeline((20, 40, 2), np.uint8)
with assert_raises(RuntimeError,
glob=" The debayer operator expects grayscale (i.e. single channel) images"):
_test_shape_pipeline((20, 40, 2, 2), np.uint8)
def test_wrong_sample_dim():
with assert_raises(RuntimeError,
glob="The number of dimensions 5 does not match any of the allowed"):
_test_shape_pipeline((1, 1, 1, 1, 1), np.uint8)
def test_no_blue_position_specified():
with assert_raises(RuntimeError, glob="Not all required arguments were specified"):
@pipeline_def
def pipeline():
bayer_imgs = fn.external_source(source_full_array((20, 20), np.uint8), batch=False)
return fn.experimental.debayer(bayer_imgs.gpu())
pipe = pipeline(batch_size=8, num_threads=4, device_id=0)
pipe.build()
pipe.run()
@params(((2, 2), ), ((1, 2), ), ((-1, 0), ))
def test_blue_position_outside_of_2x2_tile(blue_position):
with assert_raises(RuntimeError, glob="The `blue_position` position must lie within 2x2 tile"):
@pipeline_def
def pipeline():
bayer_imgs = fn.external_source(source_full_array((20, 20), np.uint8), batch=False)
return fn.experimental.debayer(bayer_imgs.gpu(), blue_position=blue_position)
pipe = pipeline(batch_size=8, num_threads=4, device_id=0)
pipe.build()
pipe.run()
|
DALI-main
|
dali/test/python/operator_1/test_debayer.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nvidia.dali import pipeline_def
import nvidia.dali.fn as fn
from test_utils import check_batch
from nose_utils import raises
batch_sizes = [5, 256, 128, 7]
max_batch_size = max(batch_sizes)
def input_batch(num_dim):
rng = np.random.default_rng(42)
for batch_size in batch_sizes:
yield [rng.random(rng.integers(low=0, high=50, size=num_dim)) for _ in range(batch_size)]
def run_pipeline(device, num_dim, replace=False, layout=None):
@pipeline_def
def pipeline():
arg = fn.external_source(input_batch(num_dim), layout=layout)
if device == "gpu":
arg = arg.gpu()
return fn.per_frame(arg, replace=replace, device=device)
pipe = pipeline(num_threads=4, batch_size=max_batch_size, device_id=0)
pipe.build()
expected_layout = "F" + "*" * (num_dim - 1) if layout is None else "F" + layout[1:]
for baseline in input_batch(num_dim):
(out,) = pipe.run()
check_batch(out, baseline, len(baseline), expected_layout=expected_layout)
def test_set_layout():
for device in ["cpu", "gpu"]:
for num_dim in (1, 2, 3):
yield run_pipeline, device, num_dim
def test_replace_layout():
for device in ["cpu", "gpu"]:
for num_dim in (1, 2, 3):
yield run_pipeline, device, num_dim, True, "XYZ"[:num_dim]
def test_verify_layout():
for device in ["cpu", "gpu"]:
for num_dim in (1, 2, 3):
yield run_pipeline, device, num_dim, False, "FYZ"[:num_dim]
def test_zero_dim_not_allowed():
expected_msg = "Cannot mark zero-dimensional input as a sequence"
for device in ["cpu", "gpu"]:
yield raises(RuntimeError, expected_msg)(run_pipeline), device, 0
@raises(RuntimeError, "Per-frame argument input must be a sequence. "
"The input layout should start with 'F'")
def _test_not_a_sequence_layout(device, num_dim, layout):
run_pipeline(device, num_dim=num_dim, layout=layout)
def test_not_a_sequence_layout():
for device in ["cpu", "gpu"]:
for num_dim in (1, 2, 3):
yield _test_not_a_sequence_layout, device, num_dim, "XYZ"[:num_dim]
def _test_pass_through():
@pipeline_def
def pipeline():
rng = fn.external_source(lambda info: np.array(
[info.iteration, info.iteration + 1], dtype=np.float32), batch=False)
return fn.per_frame(fn.random.uniform(range=rng, device="gpu", shape=(1, 1, 1), seed=42))
pipe = pipeline(batch_size=1, num_threads=4, device_id=0)
pipe.build()
for i in range(5):
(out,) = pipe.run()
[sample] = [np.array(s) for s in out.as_cpu()]
assert i <= sample[0] < i + 1
def test_pass_through():
for _ in range(50): # repeat the test as if it is wrong it does not manifest deterministically
_test_pass_through()
|
DALI-main
|
dali/test/python/operator_1/test_per_frame.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import pipeline_def
import nvidia.dali.fn as fn
import nvidia.dali.types as types
from random import shuffle
import numpy as np
from test_utils import as_array
import os
import cv2
test_data_root = os.environ['DALI_EXTRA_PATH']
images_dir = os.path.join(test_data_root, 'db', 'single', 'tiff')
dump_images = False
dump_broken = False
sequence_length = 10
class InputImagesIter(object):
def __init__(self, sequence_length):
self.sequence_length = sequence_length
# A bunch of images to be used as frames of a sequence
filenames = [
'cat-3449999_640.tiff',
'cat-1046544_640.tiff',
'cat-1245673_640.tiff',
'cat-300572_640.tiff',
'cat-111793_640.tiff',
'domestic-cat-726989_640.tiff',
'cat-3504008_640.tiff',
'cat-3591348_640.tiff',
'cat-2184682_640.tiff',
'cat-3113513_640.tiff',
]
self.files = [os.path.join(images_dir, '0', filename) for filename in filenames]
shuffle(self.files)
def _load_next(self):
in_img = None
# Skip input image if format isn't supported by OpenCV
while in_img is None:
filename = self.files[self.i]
in_img = cv2.imread(os.path.join(images_dir, filename))
self.i = (self.i + 1) % len(self.files)
# Convert to rgb, to match dali channel order
rgb = cv2.cvtColor(in_img, cv2.COLOR_BGR2RGB)
return rgb
def __iter__(self):
self.i = 0
return self
def __next__(self):
first = self._load_next()
seq = [first]
for _ in range(self.sequence_length):
img = self._load_next()
if img.shape != first.shape:
img = cv2.resize(img, (first.shape[1], first.shape[0]))
seq.append(img)
return np.stack(seq)
def _compare_to_cv_distortion(in_img, out_img, q, no):
bgr = cv2.cvtColor(in_img, cv2.COLOR_RGB2BGR)
encode_params = [int(cv2.IMWRITE_JPEG_QUALITY), q]
_, encoded_img = cv2.imencode('.jpg', bgr, params=encode_params)
decoded_img_bgr = cv2.imdecode(encoded_img, cv2.IMREAD_COLOR)
decoded_img = cv2.cvtColor(decoded_img_bgr, cv2.COLOR_BGR2RGB)
diff = cv2.absdiff(out_img, decoded_img)
diff_in_range = np.average(diff) < 5
if dump_images or (dump_broken and not diff_in_range):
i, j = no
cv2.imwrite(f"./reference_q{q}_sample{i}_{j}.bmp",
cv2.cvtColor(decoded_img, cv2.COLOR_BGR2RGB))
cv2.imwrite(f"./output_q{q}_sample{i}_{j}.bmp", cv2.cvtColor(out_img, cv2.COLOR_BGR2RGB))
assert diff_in_range, f"Absolute difference with the reference is too big: {np.average(diff)}"
def _testimpl_jpeg_compression_distortion(batch_size, device, quality, layout):
@pipeline_def(batch_size=batch_size, num_threads=3, device_id=0)
def jpeg_distortion_pipe(device='cpu', quality=None):
if layout == 'FHWC':
iii = InputImagesIter(sequence_length)
in_tensors = fn.external_source(source=iii, layout='FHWC', batch=False)
else:
encoded, _ = fn.readers.file(file_root=images_dir)
in_tensors = fn.decoders.image(encoded, device='cpu')
inputs = in_tensors.gpu() if device == 'gpu' else in_tensors
if quality is None:
quality = fn.random.uniform(range=[1, 99], dtype=types.INT32)
out_tensors = fn.jpeg_compression_distortion(inputs, quality=quality)
return (out_tensors, in_tensors, quality)
pipe = jpeg_distortion_pipe(device=device, quality=quality, batch_size=batch_size,
num_threads=2, device_id=0)
pipe.build()
for _ in range(3):
out = pipe.run()
assert out[0].layout() == layout
out_data = out[0].as_cpu() if device == 'gpu' else out[0]
in_data = out[1]
quality = out[2]
for i in range(batch_size):
out_tensor = np.array(out_data[i])
in_tensor = np.array(in_data[i])
q = int(np.array(quality[i]))
if layout == 'FHWC':
for j in range(in_tensor.shape[0]):
_compare_to_cv_distortion(in_tensor[j], out_tensor[j], q, (i, j))
else:
_compare_to_cv_distortion(in_tensor, out_tensor, q, (i, 0))
def test_jpeg_compression_distortion():
for batch_size in [1, 15]:
for device in ['cpu', 'gpu']:
for quality in [2, None, 50]:
for layout in ['HWC', 'FHWC']:
yield _testimpl_jpeg_compression_distortion, batch_size, device, quality, layout
def _testimpl_jpeg_compression_distortion_sequence(batch_size, device, seq_len, quality):
@pipeline_def(batch_size=batch_size, num_threads=3, device_id=0)
def jpeg_distortion_pipe(device='cpu', quality=None):
iii = InputImagesIter(seq_len)
inputs = fn.external_source(source=iii, layout='FHWC', batch=False)
if device == 'gpu':
inputs = inputs.gpu()
if quality is None:
quality = fn.random.uniform(range=[1, 99], dtype=types.INT32)
tmp = fn.jpeg_compression_distortion(inputs, quality=quality)
outs = []
for i in range(seq_len):
# First, slice of the distorted sequence
outs.append(fn.slice(tmp, axes=(0,), start=(i,), end=(i + 1,)))
# Second, distorted slice of the input
slice_in = fn.slice(inputs, axes=(0,), start=(i,), end=(i + 1,))
outs.append(fn.jpeg_compression_distortion(slice_in, quality=quality))
return tuple(outs)
pipe = jpeg_distortion_pipe(device=device, quality=quality)
pipe.build()
for _ in range(3):
out = pipe.run()
nouts = len(out)
assert nouts == (2 * seq_len)
for i in range(0, nouts, 2):
for s in range(batch_size):
out_data1 = as_array(out[i][s])
out_data2 = as_array(out[i + 1][s])
np.testing.assert_array_equal(out_data1, out_data2)
def test_jpeg_compression_distortion_sequence():
seq_len = 10
for batch_size in [1, 15]:
for device in ['cpu', 'gpu']:
for quality in [2, None, 50]:
yield _testimpl_jpeg_compression_distortion_sequence, batch_size, device, \
seq_len, quality
|
DALI-main
|
dali/test/python/operator_1/test_jpeg_compression_distortion.py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import pipeline_def
import nvidia.dali.fn as fn
import numpy as np
from nose_utils import assert_raises
def get_data(shapes):
return [np.empty(shape, dtype=np.uint8) for shape in shapes]
@pipeline_def
def expand_dims_pipe(shapes, axes=None, new_axis_names=None, layout=None):
data = fn.external_source(lambda: get_data(shapes), layout=layout, batch=True, device="cpu")
return fn.expand_dims(data, axes=axes, new_axis_names=new_axis_names)
def _testimpl_expand_dims(axes, new_axis_names, layout, shapes,
expected_out_shapes, expected_layout):
batch_size = len(shapes)
pipe = expand_dims_pipe(batch_size=batch_size, num_threads=1, device_id=0, shapes=shapes,
axes=axes, new_axis_names=new_axis_names, layout=layout)
pipe.build()
for _ in range(3):
outs = pipe.run()
assert outs[0].layout() == expected_layout
for i in range(batch_size):
out_arr = np.array(outs[0][i])
assert out_arr.shape == expected_out_shapes[i]
def test_expand_dims():
# axes, new_axis_names, layout, shapes, expected_shapes, expected_layout
args = [
([0, 2], "AB", "XYZ", [(10, 20, 30)], [(1, 10, 1, 20, 30)], "AXBYZ"),
([0, 3], None, "XYZ", [(10, 20, 30)], [(1, 10, 20, 1, 30)], ""),
([3], None, "XYZ", [(10, 20, 30), (100, 200, 300)],
[(10, 20, 30, 1), (100, 200, 300, 1)], ""),
([4, 3], None, "XYZ", [(10, 20, 30), (100, 200, 300)],
[(10, 20, 30, 1, 1), (100, 200, 300, 1, 1)], ""),
([0, 1, 3, 5, 7], "ABCDE", "XYZ", [(11, 22, 33)], [(1, 1, 11, 1, 22, 1, 33, 1)],
"ABXCYDZE"),
([], "", "HW", [(10, 20)], [(10, 20)], "HW"),
([0, 1], "", "", [()], [(1, 1)], ""),
([0], "", "HW", [(10, 20)], [(1, 10, 20)], ""),
([4, 3], "AB", "XYZ", [(10, 20, 30)], [(10, 20, 30, 1, 1)], "XYZBA"),
([0], "X", "", [()], [(1,)], "X")
]
for axes, new_axis_names, layout, shapes, expected_out_shapes, expected_layout in args:
yield _testimpl_expand_dims, axes, new_axis_names, layout, shapes, \
expected_out_shapes, expected_layout
def test_expand_dims_throw_error():
args = [
([4], None, None, [(10, 20, 30)],
r"Data has not enough dimensions to add new axes at specified indices."),
([0, -1], None, None, [(10, 20, 30)],
r"Axis value can't be negative"),
([2, 0, 2], "AB", "XYZ", [(10, 20, 30)],
r"Specified [\d]+ new dimensions, but layout contains only [\d]+ new dimension names"),
([2], "C", None, [(10, 20, 30)],
r"Specifying ``new_axis_names`` requires an input with a proper layout."),
]
for axes, new_axis_names, layout, shapes, err_msg in args:
pipe = expand_dims_pipe(batch_size=len(shapes), num_threads=1, device_id=0, shapes=shapes,
axes=axes, new_axis_names=new_axis_names, layout=layout)
with assert_raises(RuntimeError, regex=err_msg):
pipe.build()
pipe.run()
|
DALI-main
|
dali/test/python/operator_1/test_expand_dims.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali import pipeline_def
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import random
import numpy as np
import os
from test_utils import get_dali_extra_path
from test_noise_utils import PSNR
test_data_root = get_dali_extra_path()
images_dir = os.path.join(test_data_root, 'db', 'single', 'png')
dump_images = False
def salt_and_pepper_noise_ref(x, prob, salt_vs_pepper, per_channel, salt_val, pepper_val):
x = np.array(x, dtype=np.float32)
salt_prob = prob * salt_vs_pepper
pepper_prob = prob * (1.0 - salt_vs_pepper)
nchannels = x.shape[-1]
mask_sh = x.shape if per_channel else x.shape[:-1]
mask = np.random.choice(
[pepper_val, np.nan, salt_val], p=[pepper_prob, 1 - prob, salt_prob], size=mask_sh)
if not per_channel:
mask = np.stack([mask] * nchannels, axis=-1)
y = np.where(np.isnan(mask), x, mask).astype(np.uint8)
return y
@pipeline_def
def pipe_salt_and_pepper_noise(prob, salt_vs_pepper, channel_first, per_channel,
salt_val, pepper_val, device='cpu'):
encoded, _ = fn.readers.file(file_root=images_dir)
in_data = fn.decoders.image(encoded, output_type=types.RGB)
if device == 'gpu':
in_data = in_data.gpu()
if channel_first:
in_data = fn.transpose(in_data, perm=[2, 0, 1])
prob_arg = prob or fn.random.uniform(range=(0.05, 0.5))
salt_vs_pepper_arg = salt_vs_pepper or fn.random.uniform(range=(0.25, 0.75))
out_data = fn.noise.salt_and_pepper(
in_data, per_channel=per_channel, prob=prob_arg, salt_vs_pepper=salt_vs_pepper_arg,
salt_val=salt_val, pepper_val=pepper_val
)
return in_data, out_data, prob_arg, salt_vs_pepper_arg
def verify_salt_and_pepper(output, input, prob, salt_vs_pepper, per_channel, salt_val, pepper_val):
assert output.shape == input.shape
height, width, nchannels = output.shape
npixels = height * width
salt_count = 0
pepper_count = 0
pixel_count = 0
if per_channel:
output = np.reshape(output, (npixels * nchannels, 1))
input = np.reshape(input, (npixels * nchannels, 1))
passthrough_mask = np.all(output == input, axis=-1)
pepper_mask = np.all(output == pepper_val, axis=-1)
salt_mask = np.all(output == salt_val, axis=-1)
# This mask is meant to select only the pixels that didn't have a 'salt' or 'pepper'
# value before the noise application. Otherwise, the measured noise/pepper percentages,
# might differ a lot in images with a lot of black or white pixels.
in_pixel_mask = np.logical_and(np.all(input != pepper_val, axis=-1),
np.all(input != salt_val, axis=-1))
salt_count = np.count_nonzero(np.logical_and(salt_mask, in_pixel_mask))
pepper_count = np.count_nonzero(np.logical_and(pepper_mask, in_pixel_mask))
pixel_count = np.count_nonzero(in_pixel_mask)
assert (np.logical_or(passthrough_mask, np.logical_or(salt_mask, pepper_mask))).all()
actual_noise_prob = (pepper_count + salt_count) / pixel_count
actual_salt_vs_pepper = salt_count / (salt_count + pepper_count)
np.testing.assert_allclose(actual_noise_prob, prob, atol=1e-2)
np.testing.assert_allclose(actual_salt_vs_pepper, salt_vs_pepper, atol=1e-1)
def _testimpl_operator_noise_salt_and_pepper(device, per_channel, prob, salt_vs_pepper,
channel_first, salt_val, pepper_val, batch_size,
niter):
pipe = pipe_salt_and_pepper_noise(prob, salt_vs_pepper, channel_first, per_channel,
salt_val, pepper_val,
device=device, batch_size=batch_size,
num_threads=3, device_id=0, seed=12345)
pipe.build()
salt_val = 255 if salt_val is None else salt_val
pepper_val = 0 if pepper_val is None else pepper_val
for _ in range(niter):
out_data, in_data, prob_arg, salt_vs_pepper_arg = pipe.run()
prob_arg = prob_arg.as_array()
salt_vs_pepper_arg = salt_vs_pepper_arg.as_array()
if device == 'gpu':
out_data = out_data.as_cpu()
in_data = in_data.as_cpu()
for s in range(batch_size):
sample_in = np.array(out_data[s])
sample_out = np.array(in_data[s])
if channel_first: # Convert back to channel-last before verifying
sample_out = np.transpose(sample_out, axes=(1, 2, 0))
sample_in = np.transpose(sample_in, axes=(1, 2, 0))
prob = float(prob_arg[s])
salt_vs_pepper = float(salt_vs_pepper_arg[s])
sample_ref = salt_and_pepper_noise_ref(
sample_in, prob, salt_vs_pepper, per_channel, salt_val, pepper_val)
psnr_out = PSNR(sample_out, sample_in)
psnr_ref = PSNR(sample_ref, sample_in)
if dump_images:
import cv2
suffix_str = f"{prob}_{salt_vs_pepper}_s{s}"
if not per_channel:
suffix_str = suffix_str + "_monochrome"
cv2.imwrite(
f"./snp_noise_ref_p{suffix_str}.png",
cv2.cvtColor(sample_ref, cv2.COLOR_BGR2RGB))
cv2.imwrite(
f"./snp_noise_out_p{suffix_str}.png",
cv2.cvtColor(sample_out, cv2.COLOR_BGR2RGB))
verify_salt_and_pepper(
sample_out, sample_in, prob, salt_vs_pepper, per_channel, salt_val, pepper_val)
np.testing.assert_allclose(psnr_out, psnr_ref, atol=1)
def test_operator_noise_salt_and_pepper():
niter = 3
probs = [None, 0.021, 0.5]
salt_and_pepper_probs = [None, 1.0, 0.5, 0.0]
for device in ["cpu", "gpu"]:
for per_channel in [False, True]:
for channel_first in [False, True]:
for pepper_val, salt_val in [(None, None), (10, 50)]:
for prob in probs:
salt_and_pepper_prob = random.choice(salt_and_pepper_probs)
batch_size = random.choice([1, 3])
yield _testimpl_operator_noise_salt_and_pepper, \
device, per_channel, prob, salt_and_pepper_prob, channel_first, \
salt_val, pepper_val, batch_size, niter
|
DALI-main
|
dali/test/python/operator_1/test_noise_salt_and_pepper.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import numpy as np
import os
from test_utils import get_dali_extra_path
DEBUG_LVL = 0
SHOW_IMAGES = False
np.random.seed(1234)
data_root = get_dali_extra_path()
img_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
np_type_map = {
types.UINT8: np.uint8,
types.UINT16: np.uint16,
types.UINT32: np.uint32,
types.UINT64: np.uint64,
types.FLOAT16: np.float16,
types.FLOAT: np.float32,
types.FLOAT64: np.float64,
types.INT8: np.int8,
types.INT16: np.int16,
types.INT32: np.int32,
types.INT64: np.int64,
}
def intersects(anchors1, shapes1, anchors2, shapes2):
for i in range(len(anchors1)):
if anchors1[i] + shapes1[i] <= anchors2[i] or anchors2[i] + shapes2[i] <= anchors1[i]:
return False
return True
def prepare_cuts(
iters=4,
batch_size=16,
input_size=None,
output_size=None,
even_paste_count=False,
no_intersections=False,
full_input=False,
in_anchor_top_left=False,
in_anchor_range=None,
out_anchor_top_left=False,
out_anchor_range=None,
out_of_bounds_count=0,
):
# Those two will not work together
assert out_of_bounds_count == 0 or not no_intersections
in_idx_l = [np.zeros(shape=(0,), dtype=np.int32) for _ in range(batch_size)]
in_anchors_l = [np.zeros(shape=(0, 2), dtype=np.int32) for _ in range(batch_size)]
shapes_l = [np.zeros(shape=(0, 2), dtype=np.int32) for _ in range(batch_size)]
out_anchors_l = [np.zeros(shape=(0, 2), dtype=np.int32) for _ in range(batch_size)]
assert len(input_size) == len(output_size)
dim = len(input_size)
for i in range(batch_size):
for j in range(iters):
while True:
in_idx = np.int32(np.random.randint(batch_size))
out_idx = np.int32(i if even_paste_count else np.random.randint(batch_size))
shape = [np.int32(
np.random.randint(
min(input_size[i], output_size[i]) // (iters if no_intersections else 1)
) + 1
) for i in range(dim)] if not full_input else input_size
if in_anchor_top_left:
in_anchor = [0] * dim
elif in_anchor_range is not None:
in_anchor = [
np.int32(np.random.randint(in_anchor_range[0][i], in_anchor_range[1][i]))
for i in range(dim)
]
if full_input:
shape = [np.int32(input_size[i] - in_anchor[i]) for i in range(dim)]
else:
in_anchor = [
np.int32(np.random.randint(input_size[i] - shape[i] + 1))
for i in range(dim)
]
if out_anchor_top_left:
out_anchor = [0] * dim
elif out_anchor_range is not None:
out_anchor = [
np.int32(np.random.randint(out_anchor_range[0][i], out_anchor_range[1][i]))
for i in range(dim)
]
else:
out_anchor = [
np.int32(np.random.randint(output_size[i] - shape[i] + 1))
for i in range(dim)
]
if no_intersections:
is_ok = True
for k in range(len(in_idx_l[out_idx])):
if intersects(out_anchors_l[out_idx][k], shapes_l[out_idx][k], out_anchor,
shape):
is_ok = False
break
if not is_ok:
continue
break
break
if DEBUG_LVL >= 1:
print(f"""in_idx: {in_idx}, out_idx: {out_idx}, in_anchor: {
in_anchor}, in_shape: {shape}, out_anchor: {out_anchor}""")
in_idx_l[out_idx] = np.append(in_idx_l[out_idx], [in_idx], axis=0)
in_anchors_l[out_idx] = np.append(in_anchors_l[out_idx], [in_anchor], axis=0)
shapes_l[out_idx] = np.append(shapes_l[out_idx], [shape], axis=0)
out_anchors_l[out_idx] = np.append(out_anchors_l[out_idx], [out_anchor], axis=0)
for i in range(out_of_bounds_count):
clip_out_idx = np.random.randint(batch_size)
while len(in_idx_l[clip_out_idx]) == 0:
clip_out_idx = np.random.randint(batch_size)
clip_in_idx = np.random.randint(len(in_idx_l[clip_out_idx]))
change_in = np.random.randint(2) == 0
below_zero = np.random.randint(2) == 0
change_dim_idx = np.random.randint(dim)
if below_zero:
anchors = in_anchors_l if change_in else out_anchors_l
anchors[clip_out_idx][clip_in_idx][change_dim_idx] = np.int32(np.random.randint(-5, 0))
else:
anchors = in_anchors_l if change_in else out_anchors_l
size = input_size if change_in else output_size
anchors[clip_out_idx][clip_in_idx][change_dim_idx] = np.int32(
size[change_dim_idx]
- shapes_l[clip_out_idx][clip_in_idx][change_dim_idx]
+ np.random.randint(5) + 1)
return in_idx_l, in_anchors_l, shapes_l, out_anchors_l
def get_pipeline(
batch_size=4,
in_size=None,
out_size=None,
even_paste_count=False,
k=4,
dtype=types.UINT8,
no_intersections=True,
full_input=False,
in_anchor_top_left=False,
in_anchor_range=None,
out_anchor_top_left=False,
out_anchor_range=None,
use_gpu=False,
num_out_of_bounds=0
):
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0,
seed=np.random.randint(12345))
with pipe:
input, _ = fn.readers.file(file_root=img_dir)
decoded = fn.decoders.image(input, device='cpu', output_type=types.RGB)
resized = fn.resize(decoded, resize_x=in_size[1], resize_y=in_size[0])
in_idx_l, in_anchors_l, shapes_l, out_anchors_l = prepare_cuts(
k, batch_size, in_size, out_size, even_paste_count,
no_intersections, full_input, in_anchor_top_left, in_anchor_range,
out_anchor_top_left, out_anchor_range, num_out_of_bounds)
in_idx = fn.external_source(lambda: in_idx_l)
in_anchors = fn.external_source(lambda: in_anchors_l)
shapes = fn.external_source(lambda: shapes_l)
out_anchors = fn.external_source(lambda: out_anchors_l)
kwargs = {
"in_ids": in_idx,
"output_size": out_size,
"dtype": dtype
}
if not full_input:
kwargs["shapes"] = shapes
if not in_anchor_top_left:
kwargs["in_anchors"] = in_anchors
if not out_anchor_top_left:
kwargs["out_anchors"] = out_anchors
pasted = fn.multi_paste(resized.gpu() if use_gpu else resized, **kwargs)
pipe.set_outputs(pasted, resized)
return pipe, in_idx_l, in_anchors_l, shapes_l, out_anchors_l
def verify_out_of_bounds(batch_size, in_idx_l, in_anchors_l, shapes_l, out_anchors_l, in_size,
out_size):
for i in range(batch_size):
for j, idx in enumerate(in_idx_l[i]):
dim = len(in_anchors_l[i][j])
for d in range(dim):
if in_anchors_l[i][j][d] < 0 or out_anchors_l[i][j][d] < 0 or \
in_anchors_l[i][j][d] + shapes_l[i][j][d] > in_size[d] or \
out_anchors_l[i][j][d] + shapes_l[i][j][d] > out_size[d]:
return True
return False
def manual_verify(batch_size, inp, output, in_idx_l, in_anchors_l, shapes_l, out_anchors_l,
out_size_l, dtype):
for i in range(batch_size):
ref_source_info = ";".join([inp[idx].source_info() for idx in in_idx_l[i]])
assert output[i].source_info() == ref_source_info, \
f"{output[i].source_info()} == {ref_source_info}"
out = output.at(i)
out_size = out_size_l[i]
assert out.shape == out_size
ref = np.zeros(out.shape)
for j, idx in enumerate(in_idx_l[i]):
roi_start = in_anchors_l[i][j]
roi_end = roi_start + shapes_l[i][j]
out_start = out_anchors_l[i][j]
out_end = out_start + shapes_l[i][j]
ref[out_start[0]:out_end[0],
out_start[1]:out_end[1]] = inp.at(idx)[roi_start[0]:roi_end[0],
roi_start[1]:roi_end[1]]
ref = ref.astype(np_type_map[dtype])
if DEBUG_LVL > 0 and not np.array_equal(out, ref):
print(f"Error on image {i}")
import PIL.Image
PIL.Image.fromarray(out).save("multipaste_out.png")
PIL.Image.fromarray(ref).save("multipaste_ref.png")
assert np.array_equal(out, ref)
def show_images(batch_size, image_batch):
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
columns = 4
rows = (batch_size + 1) // (columns)
gs = gridspec.GridSpec(rows, columns)
for j in range(rows * columns):
plt.subplot(gs[j])
plt.axis("off")
plt.imshow(image_batch.at(j))
plt.show()
def check_operator_multipaste(bs, pastes, in_size, out_size, even_paste_count, no_intersections,
full_input, in_anchor_top_left, in_anchor_range, out_anchor_top_left,
out_anchor_range, out_dtype, num_out_of_bounds, device):
pipe, in_idx_l, in_anchors_l, shapes_l, out_anchors_l = get_pipeline(
batch_size=bs,
in_size=in_size,
out_size=out_size,
even_paste_count=even_paste_count,
k=pastes,
dtype=out_dtype,
no_intersections=no_intersections,
full_input=full_input,
in_anchor_top_left=in_anchor_top_left,
in_anchor_range=in_anchor_range,
out_anchor_top_left=out_anchor_top_left,
out_anchor_range=out_anchor_range,
num_out_of_bounds=num_out_of_bounds,
use_gpu=device == 'gpu'
)
pipe.build()
try:
result, input = pipe.run()
r = result.as_cpu() if device == 'gpu' else result
if SHOW_IMAGES:
show_images(bs, r)
assert not verify_out_of_bounds(bs, in_idx_l, in_anchors_l, shapes_l, out_anchors_l,
in_size, out_size)
manual_verify(bs, input, r, in_idx_l, in_anchors_l, shapes_l, out_anchors_l,
[out_size + (3, )] * bs, out_dtype)
except RuntimeError as e:
if "Paste in/out coords should be within input/output bounds" in str(e):
assert verify_out_of_bounds(bs, in_idx_l, in_anchors_l, shapes_l, out_anchors_l,
in_size, out_size)
else:
assert False
def test_operator_multipaste():
in_anchor = ((10, 10), (20, 20))
tests = [
# The arguments are:
# - batch size
# - average paster per output
# - input dimensions
# - output dimensions
# - should each output have same number of pastes
# - should generated pastes have no intersections
# - should "shapes" parameter be omitted (shape to cover from input anchor to input end)
# - should "in_anchors" parameter be omitted
# - (Optional) in_anchor value range ((xmin, y_min), (xmax, ymax))
# - should "out_anchors" parameter be omitted
# - (Optional) out_anchor value range ((xmin, y_min), (xmax, ymax))
# - output dtype
# - number of out-of-bounds anchor changes
[4, 2, (128, 256), (128, 128), False, False, False, False, None, False, None, types.UINT8, 0], # noqa: 501
[4, 2, (256, 128), (128, 128), False, True, False, False, None, False, None, types.UINT8, 0], # noqa: 501
[4, 2, (128, 128), (256, 128), True, False, False, False, None, False, None, types.UINT8, 0], # noqa: 501
[4, 2, (128, 128), (128, 256), True, True, False, False, None, False, None, types.UINT8, 0], # noqa: 501
[4, 2, (64, 64), (128, 128), False, False, True, False, None, False, None, types.UINT8, 0], # noqa: 501
[4, 2, (64, 64), (128, 128), False, False, True, False, in_anchor, False, None, types.UINT8, 0], # noqa: 501
[4, 2, (64, 64), (128, 128), False, False, False, True, None, False, None, types.UINT8, 0], # noqa: 501
[4, 2, (64, 64), (128, 128), False, False, False, False, None, True, None, types.UINT8, 0], # noqa: 501
[4, 2, (128, 128), (128, 128), False, False, False, False, None, False, None, types.UINT8, 0], # noqa: 501
[4, 2, (128, 128), (128, 128), False, False, False, False, None, False, None, types.INT16, 0], # noqa: 501
[4, 2, (128, 128), (128, 128), False, False, False, False, None, False, None, types.INT32, 0], # noqa: 501
[4, 2, (128, 128), (128, 128), False, False, False, False, None, False, None, types.FLOAT, 0], # noqa: 501
[4, 2, (128, 256), (128, 128), False, False, False, False, None, False, None, types.UINT8, 4], # noqa: 501
]
for t in tests:
yield (check_operator_multipaste, *t, "cpu")
yield (check_operator_multipaste, *t, "gpu")
|
DALI-main
|
dali/test/python/operator_1/test_multipaste.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
from nvidia.dali import backend
import nvidia.dali.ops as ops
import numpy as np
from test_utils import dali_type
def normalize(x, axes=None, mean=None, stddev=None, ddof=0, eps=0):
if type(axes) is list:
axes = tuple(axes)
num_reduced = np.prod([x.shape[a] for a in axes]) if axes else x.size
if mean is None:
mean = x.mean(axis=axes, keepdims=True)
if stddev is None and eps == 0 and num_reduced > ddof:
stddev = np.std(x, axis=axes, ddof=ddof, keepdims=True)
if stddev is None:
factor = num_reduced - ddof
sqr = (x - mean).astype(np.float)**2
var = np.sum(sqr, axis=axes, keepdims=True)
if factor > 0:
var /= factor
else:
var *= 0
stddev = np.sqrt(var + eps)
elif eps:
stddev = np.sqrt(stddev**2 + eps)
with np.errstate(divide='ignore', invalid='ignore'):
norm = (x - mean) / stddev
return np.nan_to_num(norm, copy=False, nan=0, posinf=0, neginf=0)
def batch_reduced_vol(batch, axes):
reduced_vol = 0
if axes is None:
for x in batch:
reduced_vol += np.prod(x.shape)
else:
for x in batch:
v = 1
sh = x.shape
for a in axes:
v *= sh[a]
reduced_vol += v
return reduced_vol
# calculate mean over whole batch
def batch_mean(batch, axes):
mean = None
for x in batch:
tmp = np.sum(x, axis=axes, keepdims=True)
if mean is None:
mean = tmp
else:
mean += tmp
return mean / batch_reduced_vol(batch, axes)
# calculate standard deviation over whole batch
def batch_stddev(batch, axes, mean, ddof=0, eps=0):
var = None
for i, x in enumerate(batch):
tmp = np.sum((x - mean)**2, axis=axes, keepdims=True)
if var is None:
var = tmp
else:
var += tmp
factor = batch_reduced_vol(batch, axes) - ddof
if factor > 0:
var /= factor
else:
var *= 0
return np.sqrt(var + eps)
def batch_norm(in_batch, axes=None, mean=None, stddev=None, ddof=0, eps=0):
"""
normalize a batch as a whole
non-reduced dims must have same extent in all batch items
"""
if type(axes) is list:
axes = tuple(axes)
if mean is None:
mean = batch_mean(in_batch, axes)
if stddev is None:
stddev = batch_stddev(in_batch, axes, mean, ddof, eps)
elif eps:
stddev = np.sqrt(stddev * stddev + eps)
out = []
for x in in_batch:
with np.errstate(divide='ignore', invalid='ignore'):
norm = (x - mean) / stddev
out.append(np.nan_to_num(norm, copy=False, nan=0, posinf=0, neginf=0))
return out
def generate_data(dims, batch_size, batch_norm, axes, dtype=None):
"""
Generate random tensors with given dimensionality.
If batch_norm is True, the extents in non-reduced axe
If no using batch_norm, axes argument is ignored.
"""
shapes = np.random.randint(1, 10, [batch_size, dims], dtype=int)
if batch_norm and axes is not None:
for i in range(1, batch_size):
for a in range(dims):
if a not in axes:
shapes[i, a] = shapes[0, a]
shapes = shapes.tolist()
scale = 1
if dtype is None:
dtype = np.float32
elif dtype is not np.float32:
scale = 255
return [
(scale * (np.random.rand(*s).astype(np.float32) * (1 + i) - i)).astype(dtype)
for i, s in enumerate(shapes)]
def custom_mean(batch_norm, axes):
bias = 0.3 # make the result purposefully slightly off
if type(axes) is list:
axes = tuple(axes)
if batch_norm:
def whole_batch_mean(batch):
out = batch_mean(batch, axes) + bias
return [out.astype(np.float32) for _ in range(len(batch))]
return whole_batch_mean
else:
def per_sample_mean(batch):
ret = [x.mean(axis=axes, keepdims=True, dtype=np.float32) + bias for x in batch]
return ret
return per_sample_mean
def custom_stddev(batch_norm, axes):
bias = 1.3 # make the result purposefully slightly off
mean_func = custom_mean(batch_norm, axes)
if type(axes) is list:
axes = tuple(axes)
if batch_norm:
def whole_batch_stddev(batch):
mean = mean_func(batch)[0][0]
out = bias * batch_stddev(batch, axes, mean)
return [out for _ in range(len(batch))]
return whole_batch_stddev
else:
def per_sample_stddev(batch):
mean = mean_func(batch)
out = []
for i in range(len(batch)):
stddev = bias * np.sqrt(((batch[i] - mean[i])**2).mean(axis=axes, keepdims=True))
out.append(stddev)
return out
return per_sample_stddev
def normalize_list(whole_batch, data_batch, axes=None, mean=None, stddev=None, ddof=0, eps=0):
if whole_batch:
return batch_norm(data_batch, axes, mean, stddev, ddof, eps)
else:
if type(mean) is not list:
mean = [mean] * len(data_batch)
if type(stddev) is not list:
stddev = [stddev] * len(data_batch)
return [normalize(data_batch[i].astype(np.float), axes, mean[i], stddev[i], ddof, eps)
for i in range(len(data_batch))]
def err(l1, l2):
return np.max([np.max(np.abs(a[0] - a[1])) for a in zip(l1, l2)])
def check_float(l1, l2, eps=1e-3):
for i, a in enumerate(zip(l1, l2)):
assert np.allclose(a[0], a[1], rtol=1e-3, atol=eps)
def check_integer(actual, ref, input=None):
for i, a in enumerate(zip(actual, ref)):
t = a[0].dtype
min = np.iinfo(t).min
max = np.iinfo(t).max
a1 = np.clip(a[1], min, max)
# actual values are saturated, so we must clip the reference, too
assert np.allclose(a[0], a1, atol=2)
def shift_scale(batch, shift, scale):
for i in range(len(batch)):
batch[i] = batch[i] * scale + shift
class NormalizePipeline(Pipeline):
def __init__(self, device, batch_size, dims, axes, axis_names, batch=False,
out_type=None, in_type=None, shift=None, scale=None,
num_threads=3, device_id=0, num_gpus=1):
super(NormalizePipeline, self).__init__(
batch_size, num_threads, device_id, seed=7865,
exec_async=False, exec_pipelined=False)
common_args = {
"device": device,
"axes": axes,
"axis_names": axis_names,
"batch": batch,
"dtype": dali_type(out_type),
"shift": shift,
"scale": scale
}
self.in_type = in_type
self.out_type = out_type
self.device = device
self.input = ops.ExternalSource()
self.add_layout = None
if axis_names is not None:
layout = ''
for i in range(dims):
layout += chr(ord('a') + i)
self.add_layout = ops.Reshape(layout=layout)
self.batch = batch
self.dims = dims
self.has_axes = axes is not None or axis_names is not None
self.scale = scale
self.shift = shift
self.is_integral = out_type is not None and out_type is not np.float32
if axis_names is not None:
axes = []
for a in axis_names:
axes.append(ord(a) - ord('a'))
self.axes = axes
self.axis_names = axis_names
self.ddof = 2 if axes is not None and len(axes) > 0 else 0
self.eps = 0.25
self.mean = ops.PythonFunction(function=custom_mean(batch, axes), batch_processing=True)
self.stddev = ops.PythonFunction(function=custom_stddev(batch, axes), batch_processing=True)
self.normalize = ops.Normalize(**common_args, ddof=self.ddof)
self.scalar_mean = ops.Normalize(**common_args, mean=1, ddof=self.ddof, epsilon=self.eps)
self.scalar_stddev = ops.Normalize(**common_args, stddev=2, epsilon=self.eps)
self.scalar_params = ops.Normalize(**common_args, mean=1, stddev=2)
def define_graph(self):
data = self.input_data = self.input()
if self.add_layout is not None:
data = self.add_layout(data)
mean = self.mean(data)
stddev = self.stddev(data)
dev_data = data.gpu() if self.device == "gpu" else data
normalized = self.normalize(dev_data)
scalar_mean = self.scalar_mean(dev_data)
scalar_stddev = self.scalar_stddev(dev_data)
if not self.batch:
ext_mean = self.normalize(dev_data, mean=mean)
ext_stddev = self.normalize(dev_data, stddev=stddev)
ext_all = self.normalize(dev_data, mean=mean, stddev=stddev)
scalar_mean_ext = self.scalar_mean(dev_data, stddev=stddev)
scalar_stddev_ext = self.scalar_stddev(dev_data, mean=mean)
if not self.has_axes:
scalar_params = self.scalar_params(dev_data)
out = [data, mean, stddev, normalized, scalar_mean, scalar_stddev]
if not self.batch:
out += [ext_mean, ext_stddev, ext_all, scalar_mean_ext, scalar_stddev_ext]
if not self.has_axes:
out.append(scalar_params)
return out
def check_batch(self, data, mean, stddev, normalized, scalar_mean=None, scalar_stddev=None,
ext_mean=None, ext_stddev=None, ext_all=None,
scalar_mean_ext=None, scalar_stddev_ext=None, scalar_params=None):
axes = self.axes
if type(axes) is list:
axes = tuple(axes)
batch = self.batch
mean_func = custom_mean(batch, axes)
stddev_func = custom_stddev(batch, axes)
scale = 1 if self.scale is None else self.scale
shift = 0 if self.shift is None else self.shift
def check(l1, l2):
if self.is_integral:
check_integer(l1, l2, data)
else:
eps = 1e-3 * scale * len(data[0].shape)
check_float(l1, l2, eps)
ref = normalize_list(batch, data, axes, ddof=self.ddof)
ref_scalar_mean = normalize_list(batch, data, axes, mean=1, ddof=self.ddof, eps=self.eps)
ref_scalar_stddev = normalize_list(batch, data, axes, stddev=2, eps=self.eps)
shift_scale(ref, shift, scale)
shift_scale(ref_scalar_mean, shift, scale)
shift_scale(ref_scalar_stddev, shift, scale)
mean = mean_func(data)
stddev = stddev_func(data)
check(normalized, ref)
check(scalar_mean, ref_scalar_mean)
check(scalar_stddev, ref_scalar_stddev)
if not batch:
ref_ext_mean = normalize_list(batch, data, axes, mean=mean, ddof=self.ddof)
ref_ext_stddev = normalize_list(batch, data, axes, stddev=stddev, ddof=self.ddof)
ref_ext_all = normalize_list(batch, data, axes, mean=mean, stddev=stddev)
ref_scalar_mean_ext = normalize_list(
batch, data, axes, mean=1, stddev=stddev, ddof=self.ddof, eps=self.eps)
ref_scalar_stddev_ext = normalize_list(
batch, data, axes, mean=mean, stddev=2, eps=self.eps)
shift_scale(ref_ext_mean, shift, scale)
shift_scale(ref_ext_stddev, shift, scale)
shift_scale(ref_ext_all, shift, scale)
shift_scale(ref_scalar_mean_ext, shift, scale)
shift_scale(ref_scalar_stddev_ext, shift, scale)
check(ext_mean, ref_ext_mean)
check(ext_stddev, ref_ext_stddev)
check(ext_all, ref_ext_all)
check(scalar_mean_ext, ref_scalar_mean_ext)
check(scalar_stddev_ext, ref_scalar_stddev_ext)
if scalar_params is not None:
ref_scalar_params = normalize_list(batch, data, axes, mean=1, stddev=2)
shift_scale(ref_scalar_params, shift, scale)
check(scalar_params, ref_scalar_params)
def iter_setup(self):
data = generate_data(self.dims, self.batch_size, self.batch, self.axes, dtype=self.in_type)
self.feed_input(self.input_data, data)
def to_list(tensor_list):
if isinstance(tensor_list, backend.TensorListGPU):
tensor_list = tensor_list.as_cpu()
out = []
for i in range(len(tensor_list)):
out.append(tensor_list.at(i))
return out
np.random.seed(seed=1337)
def mask2axes(mask):
out = []
a = 0
while mask:
if mask & 1:
out.append(a)
mask >>= 1
a += 1
return out
def all_axes(dim):
yield None
for mask in range(1, 1 << dim):
yield mask2axes(mask)
def _run_test(device, batch_size, dim, axes, axis_names, batch_norm,
out_type=None, in_type=None, shift=None, scale=None):
kind = "inter-sample" if batch_norm else "per-sample"
msg = "{0}, {1}, batch = {2}, dim = {3}".format(device, kind, batch_size, dim)
if axes is not None:
msg += " axes = {}".format(axes)
if axis_names is not None:
msg += " axis_names = '{}'".format(axis_names)
if out_type is not None:
msg += " output = {}".format(out_type)
if in_type is not None:
msg += " input = {}".format(in_type)
print(msg)
pipe = NormalizePipeline(
device, batch_size, dim, axes, axis_names, batch_norm, out_type, in_type, shift, scale)
pipe.build()
for iter in range(2):
out = pipe.run()
pipe.check_batch(*[to_list(x) for x in out])
def axes2names(axes, layout='abcdefghijklmnopqrstuvwxyz'):
return "".join([layout[axis] for axis in axes])
def _test_up_to_5D_all_axis_combinations(device):
batch_size = 5
for batch_norm in [False, True]:
for dim in range(1, 6):
for axes in all_axes(dim):
yield _run_test, device, batch_size, dim, axes, None, batch_norm
if axes is not None and dim < 5:
axis_names = axes2names(axes)
yield _run_test, device, batch_size, dim, None, axis_names, batch_norm
def test_cpu_up_to_5D_all_axis_combinations():
for device in ["cpu", "gpu"]:
for x in _test_up_to_5D_all_axis_combinations(device):
yield x
def test_types():
batch_size = 50
dim = 4
axes = [1, 2]
out_type = np.uint8
in_type = None
for device in ["cpu", "gpu"]:
for out_type, scale, shift in [
(np.uint8, 64, 128), (np.int16, 1000, 0), (np.float32, 0.5, 0.5)
]:
for in_type in [None, np.uint8, np.int16, np.float32]:
yield _run_test, device, batch_size, dim, axes, None, False, \
out_type, in_type, shift, scale
|
DALI-main
|
dali/test/python/operator_1/test_normalize.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import numpy as np
from functools import partial
from test_utils import compare_pipelines
from test_utils import RandomDataIterator
import librosa as librosa
from nose_utils import assert_raises
class MFCCPipeline(Pipeline):
def __init__(self, device, batch_size, iterator, axis=0, dct_type=2, lifter=1.0, n_mfcc=20,
norm=None, num_threads=1, device_id=0):
super(MFCCPipeline, self).__init__(batch_size, num_threads, device_id)
self.device = device
self.iterator = iterator
self.inputs = ops.ExternalSource()
self.mfcc = ops.MFCC(device=self.device,
axis=axis,
dct_type=dct_type,
lifter=lifter,
n_mfcc=n_mfcc,
normalize=norm)
def define_graph(self):
self.data = self.inputs()
out = self.data.gpu() if self.device == 'gpu' else self.data
out = self.mfcc(out)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data)
def mfcc_func(axis, dct_type, lifter, n_mfcc, norm, input_data):
# Librosa works with frequency-major mel-spectrograms
if axis == 1:
input_data = np.transpose(input_data)
in_shape = input_data.shape
assert len(in_shape) == 2
norm_str = 'ortho' if norm else None
out = librosa.feature.mfcc(
S=input_data, n_mfcc=n_mfcc, dct_type=dct_type, norm=norm_str, lifter=lifter)
# Scipy DCT (used by Librosa) without normalization is scaled by a factor of 2 when comparing
# with Wikipedia's formula
if not norm:
out = out / 2
# Transpose back the output if necessary
if axis == 1:
out = np.transpose(out)
return out
class MFCCPythonPipeline(Pipeline):
def __init__(self, device, batch_size, iterator, axis=0, dct_type=2, lifter=1.0, n_mfcc=20,
norm=None, num_threads=1, device_id=0, func=mfcc_func):
super(MFCCPythonPipeline, self).__init__(
batch_size, num_threads, device_id,
seed=12345, exec_async=False, exec_pipelined=False)
self.device = "cpu"
self.iterator = iterator
self.inputs = ops.ExternalSource()
function = partial(func, axis, dct_type, lifter, n_mfcc, norm)
self.mfcc = ops.PythonFunction(function=function)
def define_graph(self):
self.data = self.inputs()
out = self.mfcc(self.data)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data)
def check_operator_mfcc_vs_python(device, batch_size, input_shape,
axis, dct_type, lifter, n_mfcc, norm):
eii1 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
eii2 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
compare_pipelines(
MFCCPipeline(device, batch_size, iter(eii1),
axis=axis, dct_type=dct_type, lifter=lifter, n_mfcc=n_mfcc, norm=norm),
MFCCPythonPipeline(device, batch_size, iter(eii2),
axis=axis, dct_type=dct_type, lifter=lifter, n_mfcc=n_mfcc, norm=norm),
batch_size=batch_size, N_iterations=3, eps=1e-03)
def test_operator_mfcc_vs_python():
for device in ['cpu', 'gpu']:
for batch_size in [1, 3]:
for dct_type in [1, 2, 3]:
for norm in [False] if dct_type == 1 else [True, False]:
for axis, n_mfcc, lifter, shape in \
[(0, 17, 0.0, (17, 1)),
(1, 80, 2.0, (513, 100)),
(1, 90, 0.0, (513, 100)),
(1, 20, 202.0, (513, 100))]:
yield check_operator_mfcc_vs_python, device, batch_size, shape, \
axis, dct_type, lifter, n_mfcc, norm
def check_operator_mfcc_wrong_args(device, batch_size, input_shape,
axis, dct_type, lifter, n_mfcc, norm, msg):
with assert_raises(RuntimeError, regex=msg):
eii1 = RandomDataIterator(batch_size, shape=input_shape, dtype=np.float32)
pipe = MFCCPipeline(device, batch_size, iter(eii1),
axis=axis, dct_type=dct_type, lifter=lifter, n_mfcc=n_mfcc, norm=norm)
pipe.build()
pipe.run()
def test_operator_mfcc_wrong_args():
batch_size = 3
for device in ['cpu', 'gpu']:
for dct_type, norm, axis, n_mfcc, lifter, shape, msg in [
# DCT-I ortho-normalization is not supported
(1, True, 0, 20, 0.0, (100, 100),
"Ortho-normalization is not supported for DCT type I"),
# axis out of bounds
(2, False, -1, 20, 0.0, (100, 100),
"Provided axis cannot be negative"),
# axis out of bounds
(2, False, 2, 20, 0.0, (100, 100),
"Axis [\\d]+ is out of bounds \\[[\\d]+,[\\d]+\\)"),
# not supported DCT type
(10, False, 0, 20, 0.0, (100, 100),
"Unsupported DCT type: 10. Supported types are: 1, 2, 3, 4"),
]:
yield check_operator_mfcc_wrong_args, device, batch_size, shape, \
axis, dct_type, lifter, n_mfcc, norm, msg
|
DALI-main
|
dali/test/python/operator_1/test_mfcc.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
import nvidia.dali as dali
import nvidia.dali.fn as fn
from test_utils import check_batch, dali_type
from sequences_test_utils import ArgData, ArgDesc, sequence_suite_helper, ArgCb
def make_param(kind, shape):
if kind == "input":
return fn.random.uniform(range=(0, 1), shape=shape)
elif kind == "scalar input":
return fn.reshape(fn.random.uniform(range=(0, 1)), shape=[])
elif kind == "vector":
return np.random.rand(*shape).astype(np.float32)
elif kind == "scalar":
return np.random.rand()
else:
return None
def clip(value, type=None):
try:
info = np.iinfo(type)
return np.clip(value, info.min, info.max)
except AttributeError:
return value
def make_data_batch(batch_size, in_dim, type):
np.random.seed(1234)
batch = []
lo = 0
hi = 1
if np.issubdtype(type, np.integer):
info = np.iinfo(type)
# clip range to +/- 1000000 to prevent excessively large epsilons
lo = max(info.min / 2, -1000000)
hi = min(info.max / 2, 1000000)
for i in range(batch_size):
batch.append(
(np.random.rand(np.random.randint(0, 10000), in_dim) * (hi - lo) + lo).astype(type))
return batch
def get_data_source(batch_size, in_dim, type):
return lambda: make_data_batch(batch_size, in_dim, type)
def _run_test(device, batch_size, out_dim, in_dim, in_dtype, out_dtype, M_kind, T_kind):
pipe = dali.pipeline.Pipeline(batch_size=batch_size, num_threads=4, device_id=0, seed=1234)
with pipe:
X = fn.external_source(source=get_data_source(batch_size, in_dim, in_dtype), device=device,
layout="NX")
M = None
T = None
MT = None
if T_kind == "fused":
MT = make_param(M_kind, [out_dim, in_dim + 1])
else:
M = make_param(M_kind, [out_dim, in_dim])
T = make_param(T_kind, [out_dim])
Y = fn.coord_transform(
X,
MT=MT.flatten().tolist() if isinstance(MT, np.ndarray) else MT,
M=M.flatten().tolist() if isinstance(M, np.ndarray) else M,
T=T.flatten().tolist() if isinstance(T, np.ndarray) else T,
dtype=dali_type(out_dtype)
)
if M is None:
M = 1
if T is None:
T = 0
if MT is None:
MT = 0
M, T, MT = (
x if isinstance(x, dali.data_node.DataNode)
else dali.types.Constant(x, dtype=dali.types.FLOAT)
for x in (M, T, MT))
pipe.set_outputs(X, Y, M, T, MT)
pipe.build()
for iter in range(3):
outputs = pipe.run()
outputs = [x.as_cpu() if hasattr(x, "as_cpu") else x for x in outputs]
ref = []
scale = 1
for idx in range(batch_size):
X = outputs[0].at(idx)
if T_kind == "fused":
MT = outputs[4].at(idx)
if MT.size == 1:
M = MT
T = 0
else:
M = MT[:, :-1]
T = MT[:, -1]
else:
M = outputs[2].at(idx)
T = outputs[3].at(idx)
if M.size == 1:
Y = X.astype(np.float32) * M + T
else:
Y = np.matmul(X.astype(np.float32), M.transpose()) + T
if np.issubdtype(out_dtype, np.integer):
info = np.iinfo(out_dtype)
Y = Y.clip(info.min, info.max)
ref.append(Y)
scale = max(scale, np.max(np.abs(Y)) - np.min(np.abs(Y))) if Y.size > 0 else 1
avg = 1e-6 * scale
eps = 1e-6 * scale
if out_dtype != np.float32: # headroom for rounding
avg += 0.33
eps += 0.5
check_batch(outputs[1], ref, batch_size, eps, eps, expected_layout="NX")
def test_all():
for device in ["cpu", "gpu"]:
for M_kind in [None, "vector", "scalar", "input", "scalar input"]:
for T_kind in [None, "vector", "scalar", "input", "scalar input"]:
for batch_size in [1, 3]:
yield _run_test, device, batch_size, 3, 3, np.float32, np.float32, M_kind, \
T_kind
for device in ["cpu", "gpu"]:
for in_dtype in [np.uint8, np.uint16, np.int16, np.int32, np.float32]:
for out_dtype in set([in_dtype, np.float32]):
for batch_size in [1, 8]:
yield _run_test, device, batch_size, 3, 3, in_dtype, out_dtype, "input", "input"
for device in ["cpu", "gpu"]:
for M_kind in ["input", "vector", None]:
for in_dim in [1, 2, 3, 4, 5, 6]:
if M_kind == "vector" or M_kind == "input":
out_dims = [1, 2, 3, 4, 5, 6]
else:
out_dims = [in_dim]
for out_dim in out_dims:
yield _run_test, device, 2, out_dim, in_dim, np.float32, np.float32, M_kind, \
"vector"
for device in ["cpu", "gpu"]:
for MT_kind in ["vector", "input", "scalar"]:
for in_dim in [1, 2, 3, 4, 5, 6]:
if MT_kind == "vector" or MT_kind == "input":
out_dims = [1, 2, 3, 4, 5, 6]
else:
out_dims = [in_dim]
for out_dim in out_dims:
yield _run_test, device, 2, out_dim, in_dim, np.float32, np.float32, MT_kind, \
"fused"
def _test_empty_input(device):
pipe = dali.pipeline.Pipeline(batch_size=2, num_threads=4, device_id=0, seed=1234)
with pipe:
X = fn.external_source(source=[[np.zeros([0, 3]), np.zeros([0, 3])]], device="cpu",
layout="AB")
Y = fn.coord_transform(X, M=(1, 2, 3, 4, 5, 6), T=(1, 2))
pipe.set_outputs(Y)
pipe.build()
o = pipe.run()
assert o[0].layout() == "AB"
assert len(o[0]) == 2
for i in range(len(o[0])):
assert o[0].at(0).size == 0
def test_empty_input():
for device in ["cpu", "gpu"]:
yield _test_empty_input, device
def test_sequences():
rng = random.Random(42)
np_rng = np.random.default_rng(12345)
max_batch_size = 64
max_num_frames = 50
num_points = 30
num_iters = 4
def points():
return np.float32(np_rng.uniform(-100, 250, (num_points, 2)))
def rand_range(limit):
return range(rng.randint(1, limit) + 1)
def m(sample_desc):
angles = np_rng.uniform(-np.pi, np.pi, 2)
scales = np_rng.uniform(0, 5, 2)
c = np.cos(angles[0])
s = np.sin(angles[1])
return np.array([
[c * scales[0], -s],
[s, c * scales[1]]], dtype=np.float32)
def t(sample_desc):
return np.float32(np_rng.uniform(-100, 250, 2))
def mt(sample_desc):
return np.append(m(sample_desc), t(sample_desc).reshape(-1, 1), axis=1)
input_cases = [
(fn.coord_transform, {}, [ArgCb("M", m, True)]),
(fn.coord_transform, {}, [ArgCb("T", t, True)]),
(fn.coord_transform, {}, [ArgCb("MT", mt, True)]),
(fn.coord_transform, {}, [ArgCb("MT", mt, False)]),
(fn.coord_transform, {}, [ArgCb("M", m, True), ArgCb("T", t, True)]),
(fn.coord_transform, {}, [ArgCb("M", m, False), ArgCb("T", t, True)]),
]
input_seq_data = [[
np.array([points() for _ in rand_range(max_num_frames)], dtype=np.float32)
for _ in rand_range(max_batch_size)]
for _ in range(num_iters)]
main_input = ArgData(
desc=ArgDesc(0, "F", "", "F**"),
data=input_seq_data)
yield from sequence_suite_helper(rng, [main_input], input_cases, num_iters)
input_broadcast_cases = [
(fn.coord_transform, {}, [ArgCb(0, lambda _: points(), False, "cpu")], ["cpu"]),
(fn.coord_transform, {}, [ArgCb(0, lambda _: points(), False, "gpu")], ["cpu"]),
]
input_mt_data = [[
np.array([mt(None) for _ in rand_range(max_num_frames)], dtype=np.float32)
for _ in rand_range(max_batch_size)]
for _ in range(num_iters)]
main_input = ArgData(
desc=ArgDesc("MT", "F", "", "F**"),
data=input_mt_data)
yield from sequence_suite_helper(rng, [main_input], input_broadcast_cases, num_iters)
|
DALI-main
|
dali/test/python/operator_1/test_coord_transform.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nvidia.dali import pipeline_def
from nvidia.dali.backend import TensorGPU
import nvidia.dali.fn as fn
import nvidia.dali.types as types
def get_dali_tensor_gpu(value, shape, dtype, device_id=0) -> TensorGPU:
"""Helper function to create DALI TensorGPU.
Args:
value : Value to fill the tensor with.
shape : Shape for the tensor.
dtype : Data type for the tensor.
Returns:
TensorGPU: DALI TensorGPU with provided shape and dtype filled
with provided value.
"""
@pipeline_def(num_threads=1, batch_size=1)
def dali_pipeline():
values = types.Constant(value=np.full(shape, value, dtype), device='gpu')
return values
pipe = dali_pipeline(device_id=device_id)
pipe.build()
dali_output = pipe.run()
return dali_output[0][0]
def sequential_pipeline(batch_size, shape):
"""Helper to create DALI pipelines that return GPU tensors with sequential values.
Args:
batch_size: Batch size for the pipeline.
shape : Shape for the output tensor.
"""
def numpy_sequential_tensors(sample_info):
return np.full(shape, sample_info.idx_in_epoch, dtype=np.int32)
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=0)
def sequential_pipeline_def():
data = fn.external_source(
source=numpy_sequential_tensors,
num_outputs=1,
batch=False,
dtype=types.INT32)
data = data[0].gpu()
return data
return sequential_pipeline_def()
def pipeline_with_variable_shape_output(batch_size):
"""Helper to create DALI pipelines that return GPU tensors with variable shape.
Args:
batch_size: Batch size for the pipeline.
"""
def numpy_tensors(sample_info):
tensors = [
np.full((1, 5), sample_info.idx_in_epoch, dtype=np.int32),
np.full((1, 3), sample_info.idx_in_epoch, dtype=np.int32),
np.full((1, 2), sample_info.idx_in_epoch, dtype=np.int32),
np.full((1, 4), sample_info.idx_in_epoch, dtype=np.int32)
]
return tensors[sample_info.idx_in_epoch % len(tensors)]
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=0)
def sequential_pipeline_def():
data = fn.external_source(
source=numpy_tensors,
num_outputs=1,
batch=False,
dtype=types.INT32)
data = data[0].gpu()
return data
return sequential_pipeline_def()
|
DALI-main
|
dali/test/python/jax_plugin/utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax
import numpy as np
import nvidia.dali.plugin.jax as dax
from nvidia.dali import pipeline_def
import nvidia.dali.fn as fn
import nvidia.dali.types as types
from nvidia.dali.plugin.jax import DALIGenericIterator
from jax.sharding import PositionalSharding, NamedSharding, PartitionSpec, Mesh
from jax.experimental import mesh_utils
from utils import get_dali_tensor_gpu
import jax.numpy as jnp
def sequential_sharded_pipeline(
batch_size, shape, device_id, shard_id, shard_size, multiple_outputs=False):
"""Helper to create DALI pipelines that return GPU tensors with sequential values
and are iterating over virtual sharded dataset.
For example setting shard_id for 2 and shard size for 8 will result in pipeline
that starts its iteration from the sample with value 16 since this is third
shard (shard_id=2) and the shard size is 8.
Args:
batch_size: Batch size for the pipeline.
shape : Shape of the output tensor.
device_id : Id of the device that pipeline will run on.
shard_id : Id of the shard for the pipeline.
shard_size : Size of the shard for the pipeline.
multiple_outputs : If True, pipeline will return multiple outputs.
"""
def create_numpy_sequential_tensors_callback():
shard_offset = shard_size * shard_id
def numpy_sequential_tensors(sample_info):
return np.full(shape, sample_info.idx_in_epoch + shard_offset, dtype=np.int32)
return numpy_sequential_tensors
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=device_id)
def sequential_pipeline_def():
data = fn.external_source(
source=create_numpy_sequential_tensors_callback(),
num_outputs=1,
batch=False,
dtype=types.INT32)
data = data[0].gpu()
if not multiple_outputs:
return data
return data, data + 0.25, data + 0.5
return sequential_pipeline_def()
def test_dali_sequential_sharded_tensors_to_jax_sharded_array_manuall():
assert jax.device_count() > 1, "Multigpu test requires more than one GPU"
batch_size = 4
shape = (1, 5)
# given
pipe_0 = sequential_sharded_pipeline(
batch_size=batch_size, shape=shape, device_id=0, shard_id=0, shard_size=batch_size)
pipe_0.build()
pipe_1 = sequential_sharded_pipeline(
batch_size=batch_size, shape=shape, device_id=1, shard_id=1, shard_size=batch_size)
pipe_1.build()
for batch_id in range(100):
dali_tensor_gpu_0 = pipe_0.run()[0].as_tensor()
dali_tensor_gpu_1 = pipe_1.run()[0].as_tensor()
jax_shard_0 = dax.integration._to_jax_array(dali_tensor_gpu_0)
jax_shard_1 = dax.integration._to_jax_array(dali_tensor_gpu_1)
assert jax_shard_0.device() == jax.devices()[0]
assert jax_shard_1.device() == jax.devices()[1]
# when
jax_array = jax.device_put_sharded(
[jax_shard_0, jax_shard_1],
[jax_shard_0.device(), jax_shard_1.device()])
# then
# Assert that all values are as expected
# In first iteration, first shard should be:
# [[0 0 0 0 0]
# [1 1 1 1 1]
# [2 2 2 2 2]
# [3 3 3 3 3]]
# And second shrad should be:
# [[4 4 4 4 4]
# [5 5 5 5 5]
# [6 6 6 6 6]
# [7 7 7 7 7]]
# Then, in second iteration first shard should be:
# [[4 4 4 4 4]
# [5 5 5 5 5]
# [6 6 6 6 6]
# [7 7 7 7 7]]
# And second shard should be:
# [[ 8 8 8 8 8]
# [ 9 9 9 9 9]
# [10 10 10 10 10]
# [11 11 11 11 11]]
assert jax.numpy.array_equal(
jax_array.device_buffers[0],
jax.numpy.stack([
jax.numpy.full(shape[1:], value, np.int32)
for value in range(batch_id*batch_size, (batch_id+1)*batch_size)]))
assert jax.numpy.array_equal(
jax_array.device_buffers[1],
jax.numpy.stack([
jax.numpy.full(shape[1:], value, np.int32)
for value in range((batch_id+1)*batch_size, (batch_id+2)*batch_size)]))
# Assert correct backing devices for shards
assert jax_array.device_buffers[0].device() == jax_shard_0.device()
assert jax_array.device_buffers[1].device() == jax_shard_1.device()
def test_dali_sequential_sharded_tensors_to_jax_sharded_array_iterator_multiple_outputs():
assert jax.device_count() > 1, "Multigpu test requires more than one GPU"
batch_size = 4
shape = (1, 5)
# given
pipe_0 = sequential_sharded_pipeline(
batch_size=batch_size,
shape=shape,
device_id=0,
shard_id=0,
shard_size=batch_size,
multiple_outputs=True)
pipe_1 = sequential_sharded_pipeline(
batch_size=batch_size,
shape=shape,
device_id=1,
shard_id=1,
shard_size=batch_size,
multiple_outputs=True)
output_names = ['data_0', 'data_1', 'data_2']
# when
dali_iterator = DALIGenericIterator([pipe_0, pipe_1], output_names, size=batch_size*10)
for batch_id, batch in enumerate(dali_iterator):
# then
# check values for all outputs
# for the data_0 values should be the same as in the single output example
# for data_1 values are the same + 0.25, for data_2 the same + 0.5
for output_id, output_name in enumerate(output_names):
jax_array = batch[output_name]
assert jax.numpy.array_equal(
jax_array.device_buffers[0],
jax.numpy.stack([
jax.numpy.full(shape[1:], value + output_id * 0.25, np.float32)
for value in range(batch_id*batch_size, (batch_id+1)*batch_size)]))
assert jax.numpy.array_equal(
jax_array.device_buffers[1],
jax.numpy.stack([
jax.numpy.full(shape[1:], value + output_id * 0.25, np.float32)
for value in range((batch_id+1)*batch_size, (batch_id+2)*batch_size)]))
# Assert correct backing devices for shards
assert jax_array.device_buffers[0].device() == jax.devices()[0]
assert jax_array.device_buffers[1].device() == jax.devices()[1]
# Assert correct number of batches returned from the iterator
assert batch_id == 4
def run_sharding_test(sharding):
# given
dali_shard_0 = get_dali_tensor_gpu(0, (1), np.int32, 0)
dali_shard_1 = get_dali_tensor_gpu(1, (1), np.int32, 1)
shards = [dax.integration._to_jax_array(dali_shard_0),
dax.integration._to_jax_array(dali_shard_1)]
assert shards[0].device() == jax.devices()[0]
assert shards[1].device() == jax.devices()[1]
# when
dali_sharded_array = jax.make_array_from_single_device_arrays(
shape=(2,), sharding=sharding, arrays=shards)
# then
jax_sharded_array = jax.device_put(jnp.arange(2), sharding)
assert (dali_sharded_array == jax_sharded_array).all()
assert len(dali_sharded_array.device_buffers) == jax.device_count()
assert dali_sharded_array.device_buffers[0].device() == jax.devices()[0]
assert dali_sharded_array.device_buffers[1].device() == jax.devices()[1]
def run_sharding_iterator_test(sharding):
assert jax.device_count() > 1, "Multigpu test requires more than one GPU"
batch_size = 4
shape = (1, 5)
# given
pipe_0 = sequential_sharded_pipeline(
batch_size=batch_size,
shape=shape,
device_id=0,
shard_id=0,
shard_size=batch_size,
multiple_outputs=True)
pipe_1 = sequential_sharded_pipeline(
batch_size=batch_size,
shape=shape,
device_id=1,
shard_id=1,
shard_size=batch_size,
multiple_outputs=True)
output_names = ['data_0', 'data_1', 'data_2']
# when
dali_iterator = DALIGenericIterator(
[pipe_0, pipe_1], output_names, size=batch_size*10, sharding=sharding)
for batch_id, batch in enumerate(dali_iterator):
# then
# check values for all outputs
# for the data_0 values should be the same as in the single output example
# for data_1 values are the same + 0.25, for data_2 the same + 0.5
for output_id, output_name in enumerate(output_names):
jax_array = batch[output_name]
assert jax.numpy.array_equal(
jax_array,
jax.numpy.stack([
jax.numpy.full(shape[1:], value + output_id * 0.25, np.float32)
for value in range(batch_id*batch_size, (batch_id+2)*batch_size)]))
# Assert correct backing devices for shards
assert jax_array.device_buffers[0].device() == jax.devices()[0]
assert jax_array.device_buffers[1].device() == jax.devices()[1]
# Assert correct number of batches returned from the iterator
assert batch_id == 4
def test_positional_sharding_workflow():
sharding = PositionalSharding(jax.devices())
run_sharding_test(sharding)
def test_named_sharding_workflow():
mesh = Mesh(jax.devices(), axis_names=('device'))
sharding = NamedSharding(mesh, PartitionSpec('device'))
run_sharding_test(sharding)
def test_positional_sharding_workflow_with_iterator():
mesh = mesh_utils.create_device_mesh((jax.device_count(), 1))
sharding = PositionalSharding(mesh)
run_sharding_iterator_test(sharding)
def test_named_sharding_workflow_with_iterator():
mesh = Mesh(jax.devices(), axis_names=('batch'))
sharding = NamedSharding(mesh, PartitionSpec('batch'))
run_sharding_iterator_test(sharding)
|
DALI-main
|
dali/test/python/jax_plugin/test_multigpu.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax
import jax.numpy as jnp
import numpy as np
import logging as log
from test_integration import get_dali_tensor_gpu
from jax.sharding import PositionalSharding, NamedSharding, PartitionSpec, Mesh
import nvidia.dali.plugin.jax as dax
def print_devices(process_id):
log.info(f"Local devices = {jax.local_device_count()}, "
f"global devices = {jax.device_count()}")
log.info("All devices: ")
print_devices_details(jax.devices(), process_id)
log.info("Local devices:")
print_devices_details(jax.local_devices(), process_id)
def print_devices_details(devices_list, process_id):
for device in devices_list:
log.info(f"Id = {device.id}, host_id = {device.host_id}, "
f"process_id = {device.process_index}, kind = {device.device_kind}")
def test_lax_workflow(process_id):
array_from_dali = dax.integration._to_jax_array(get_dali_tensor_gpu(1, (1), np.int32))
assert array_from_dali.device() == jax.local_devices()[0], \
"Array should be backed by the device local to current process."
sum_across_devices = jax.pmap(lambda x: jax.lax.psum(x, 'i'), axis_name='i')(array_from_dali)
assert sum_across_devices[0] == len(jax.devices()), \
"Sum across devices should be equal to the number of devices as data per device = [1]"
log.info("Passed lax workflow test")
def run_distributed_sharing_test(sharding, process_id):
dali_local_shard = dax.integration._to_jax_array(
get_dali_tensor_gpu(process_id, (1), np.int32, 0))
# Note: we pass only one local shard but the array virtually
# combines all shards together
dali_sharded_array = jax.make_array_from_single_device_arrays(
shape=(2,), sharding=sharding, arrays=[dali_local_shard])
# This array should be backed only by one device buffer that holds
# local part of the data. This buffer should be on the local device.
assert len(dali_sharded_array.device_buffers) == 1
assert dali_sharded_array.device_buffer == jnp.array([process_id])
assert dali_sharded_array.device_buffer.device() == jax.local_devices()[0]
assert dali_sharded_array.device_buffer.device() == jax.devices()[process_id]
def test_positional_sharding_workflow(process_id):
sharding = PositionalSharding(jax.devices())
run_distributed_sharing_test(sharding=sharding, process_id=process_id)
log.info("Passed positional sharding workflow test")
def test_named_sharding_workflow(process_id):
mesh = Mesh(jax.devices(), axis_names=('device'))
sharding = NamedSharding(mesh, PartitionSpec('device'))
run_distributed_sharing_test(sharding=sharding, process_id=process_id)
log.info("Passed named sharding workflow test")
def run_multiprocess_workflow(process_id=0):
jax.distributed.initialize(
coordinator_address="localhost:12321",
num_processes=2,
process_id=process_id)
log.basicConfig(
level=log.INFO,
format=f"PID {process_id}: %(message)s")
print_devices(process_id=process_id)
test_lax_workflow(process_id=process_id)
test_positional_sharding_workflow(process_id=process_id)
test_named_sharding_workflow(process_id=process_id)
if __name__ == "__main__":
run_multiprocess_workflow(process_id=0)
|
DALI-main
|
dali/test/python/jax_plugin/jax_server.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import jax
import jax.numpy
import jax.dlpack
import nvidia.dali.plugin.jax as dax
from nose2.tools import cartesian_params
from utils import get_dali_tensor_gpu, sequential_pipeline
@cartesian_params(
(np.float32, np.int32), # dtypes to test
([], [1], [10], [2, 4], [1, 2, 3]), # shapes to test
(1, -99)) # values to test
def test_dali_tensor_gpu_to_jax_array(dtype, shape, value):
# given
dali_tensor_gpu = get_dali_tensor_gpu(
value=value, shape=shape, dtype=dtype)
# when
jax_array = dax.integration._to_jax_array(dali_tensor_gpu)
# then
assert jax.numpy.array_equal(
jax_array,
jax.numpy.full(shape, value, dtype))
# Make sure JAX array is backed by the GPU
assert jax_array.device() == jax.devices()[0]
def test_dali_sequential_tensors_to_jax_array():
batch_size = 4
shape = (1, 5)
pipe = sequential_pipeline(batch_size, shape)
pipe.build()
for batch_id in range(100):
# given
dali_tensor_gpu = pipe.run()[0].as_tensor()
# when
jax_array = dax.integration._to_jax_array(dali_tensor_gpu)
# then
assert jax_array.device() == jax.devices()[0]
for i in range(batch_size):
assert jax.numpy.array_equal(
jax_array[i],
jax.numpy.full(
shape[1:], # TODO(awolant): Explain/fix shape consistency
batch_id * batch_size + i,
np.int32))
|
DALI-main
|
dali/test/python/jax_plugin/test_integration.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import jax
import jax.numpy
import jax.dlpack
from utils import sequential_pipeline
import nvidia.dali.plugin.jax as dax
def test_dali_sequential_iterator_to_jax_array():
batch_size = 4
shape = (1, 5)
pipe = sequential_pipeline(batch_size, shape)
iter = dax.DALIGenericIterator([pipe], ['data'], size=batch_size*100)
for batch_id, data in enumerate(iter):
# given
jax_array = data['data']
# then
assert jax_array.device() == jax.devices()[0]
for i in range(batch_size):
assert jax.numpy.array_equal(
jax_array[i],
jax.numpy.full(
shape[1:], # TODO(awolant): Explain shape consistency
batch_id * batch_size + i,
np.int32))
assert batch_id == 99
|
DALI-main
|
dali/test/python/jax_plugin/test_iterator.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax.numpy as jnp
from nvidia.dali.plugin.jax.clu import DALIGenericPeekableIterator as DALIIterator
from test_integration import sequential_pipeline
from clu.data.dataset_iterator import ArraySpec
from nose_utils import raises
import time
from utils import pipeline_with_variable_shape_output
# Common parameters for all tests in this file
batch_size = 3
shape = (1, 5)
batch_shape = (batch_size, *shape[1:])
def test_jax_peekable_iterator_peek():
# given
pipe = sequential_pipeline(batch_size, shape)
# when
iterator = DALIIterator([pipe], ['data'], size=batch_size*100)
# then
assert iterator.element_spec == {'data': ArraySpec(dtype=jnp.int32, shape=batch_shape)}
for i in range(5):
peeked_output = iterator.peek()
output = iterator.next()
assert jnp.array_equal(
output['data'], peeked_output['data'])
def test_jax_peekable_iterator_peek_async_result_before_next():
# given
pipe = sequential_pipeline(batch_size, shape)
# when
iterator = DALIIterator([pipe], ['data'], size=batch_size*100)
# then
assert iterator.element_spec == {'data': ArraySpec(dtype=jnp.int32, shape=batch_shape)}
for i in range(5):
peeked_output = iterator.peek_async()
peeked_output = peeked_output.result()
output = iterator.next()
assert jnp.array_equal(
output['data'], peeked_output['data']), \
f"output: {output['data']}, peeked_output: {peeked_output['data']}"
def test_jax_peekable_iterator_peek_async_result_after_next():
'''This test is not deterministic, but it should pass most of the time.'''
# given
pipe = sequential_pipeline(batch_size, shape)
# when
iterator = DALIIterator([pipe], ['data'], size=batch_size*100)
# then
assert iterator.element_spec == {'data': ArraySpec(dtype=jnp.int32, shape=batch_shape)}
for i in range(5):
peeked_output = iterator.peek_async()
time.sleep(0.1) # wait before calling next to give time for peek to start
output = iterator.next()
peeked_output = peeked_output.result()
assert jnp.array_equal(
output['data'], peeked_output['data']), \
f"output: {output['data']}, peeked_output: {peeked_output['data']}"
@raises(ValueError, glob="The shape or type of the output changed between iterations.")
def test_jax_peekable_iterator_with_variable_shapes_pipeline():
# given
batch_size = 1
pipe = pipeline_with_variable_shape_output(batch_size)
iterator = DALIIterator([pipe], ['data'], size=batch_size*100)
iterator.next()
# when
iterator.next()
|
DALI-main
|
dali/test/python/jax_plugin/test_peekable_iterator.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from jax_server import run_multiprocess_workflow
if __name__ == "__main__":
run_multiprocess_workflow(process_id=1)
|
DALI-main
|
dali/test/python/jax_plugin/jax_client.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from nose2.tools import params
from nvidia.dali import fn, pipeline_def, types
from nvidia.dali.auto_aug.core import augmentation, select
from test_utils import get_dali_extra_path, check_batch
data_root = get_dali_extra_path()
images_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
def sample_info(cb):
def idx_in_batch_cb(sample_info):
return np.array(cb(sample_info), dtype=np.int32)
return fn.external_source(idx_in_batch_cb, batch=False)
@augmentation(mag_range=((1.1, 3)))
def overexpose(image, multiplier):
return fn.cast_like(image * multiplier, image)
@augmentation(mag_range=(0.1, 0.9))
def blend_edges(image, blend_factor):
edges = fn.laplacian(image, window_size=5, dtype=types.UINT8)
return fn.cast_like((1. - blend_factor) * image + blend_factor * edges, image)
def as_square_shape(edge_len):
return [edge_len, edge_len]
@augmentation(mag_range=(0.1, 0.7), mag_to_param=as_square_shape)
def cutout(image, shape):
return fn.erase(image, shape=shape, anchor=[0, 0], normalized=True, fill_value=120)
@params(
("cpu", ),
("gpu", ),
)
def test_select(dev):
def _collect_batch(p):
p.build()
batches = p.run()
if dev == "gpu":
batches = (batch.as_cpu() for batch in batches)
return tuple([np.array(sample) for sample in batch] for batch in batches)
ops = [overexpose, blend_edges, cutout]
num_magnitude_bins = 4
batch_size = num_magnitude_bins * len(ops)
@pipeline_def(enable_conditionals=True, batch_size=batch_size, num_threads=4, device_id=0)
def pipeline_select():
op_idx = sample_info(lambda info: info.idx_in_batch % len(ops))
magnitude_bin = sample_info(lambda info: info.idx_in_batch % num_magnitude_bins)
image, _ = fn.readers.file(name="Reader", file_root=images_dir)
image = fn.decoders.image(image, device="cpu" if dev == "cpu" else "mixed")
return select(ops, op_idx, image, magnitude_bin=magnitude_bin, num_magnitude_bins=4)
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=0)
def pipeline_refs():
magnitude_bin = sample_info(lambda info: info.idx_in_batch % num_magnitude_bins)
image, _ = fn.readers.file(name="Reader", file_root=images_dir)
image = fn.decoders.image(image, device="cpu" if dev == "cpu" else "mixed")
return tuple(op(image, magnitude_bin=magnitude_bin, num_magnitude_bins=4) for op in ops)
batch_select, = _collect_batch(pipeline_select())
ref_batches = _collect_batch(pipeline_refs())
ref_batch = [ref_batches[idx % len(ops)][idx] for idx in range(batch_size)]
check_batch(batch_select, ref_batch, max_allowed_error=1e-6)
|
DALI-main
|
dali/test/python/auto_aug/test_select.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali.tensors as _tensors
from nvidia.dali import pipeline_def, fn, types
from nvidia.dali.auto_aug.core import augmentation, signed_bin
from nvidia.dali.auto_aug.core._args import forbid_unused_kwargs
from nose2.tools import params
from test_utils import check_batch
from nose_utils import assert_warns, assert_raises
def sample_info(cb):
def idx_in_batch_cb(sample_info):
return np.array(cb(sample_info), dtype=np.int32)
return fn.external_source(idx_in_batch_cb, batch=False)
def ref_param(mag_range, mag_range_num_elements, bins_batch, mag_signs_batch=None,
mag_to_param=None):
if isinstance(mag_range, tuple):
assert len(mag_range) == 2
lo, hi = mag_range
mag_range = np.linspace(lo, hi, mag_range_num_elements)
magnitudes = [mag_range[mag_bin] for mag_bin in bins_batch]
if mag_signs_batch is not None:
assert len(mag_signs_batch) == len(magnitudes)
magnitudes = [mag * ((-1)**negate) for mag, negate in zip(magnitudes, mag_signs_batch)]
mag_to_param = mag_to_param if mag_to_param is not None else np.array
return np.array([mag_to_param(mag) for mag in magnitudes])
def test_magnitude_is_none():
@augmentation
def pass_through_sample(data, param):
assert param is None, "If the `mag_range` is not specified the param should be None"
return data
data = types.Constant(42)
assert pass_through_sample(data, magnitude_bin=42) is data
def test_lo_hi_mag_range():
mag_range = (100, 110)
batch_size = 11
const_bin = 2
@augmentation(mag_range=mag_range)
def pass_through_mag(data, param):
return param
@pipeline_def(num_threads=4, device_id=0, batch_size=batch_size, seed=42)
def pipeline():
idx_in_batch = sample_info(lambda info: info.idx_in_batch)
const_mag = pass_through_mag(types.Constant(42), magnitude_bin=const_bin,
num_magnitude_bins=5)
dyn_mag = pass_through_mag(types.Constant(42), magnitude_bin=idx_in_batch,
num_magnitude_bins=11)
return const_mag, dyn_mag
p = pipeline()
p.build()
const_mag, dyn_mag = p.run()
const_mag_ref = ref_param(mag_range, 5, [const_bin] * batch_size)
dyn_mag_ref = ref_param(mag_range, 11, list(range(batch_size)))
check_batch(const_mag, const_mag_ref, max_allowed_error=0)
check_batch(dyn_mag, dyn_mag_ref, max_allowed_error=0)
def test_explicit_mag_range():
mag_range = np.array([1, 1, 2, 3, 5, 8, 13, 21])
batch_size = 8
const_bin = 7
@augmentation(mag_range=mag_range)
def pass_through_mag(data, param):
return param
@pipeline_def(num_threads=4, device_id=0, batch_size=batch_size, seed=42)
def pipeline():
idx_in_batch = sample_info(lambda info: info.idx_in_batch)
const_mag = pass_through_mag(types.Constant(42), magnitude_bin=const_bin)
dyn_mag = pass_through_mag(types.Constant(42), magnitude_bin=idx_in_batch)
return const_mag, dyn_mag
p = pipeline()
p.build()
const_mag, dyn_mag = p.run()
const_mag_ref = ref_param(mag_range, None, [const_bin] * batch_size)
dyn_mag_ref = ref_param(mag_range, None, list(range(batch_size)))
check_batch(const_mag, const_mag_ref, max_allowed_error=0)
check_batch(dyn_mag, dyn_mag_ref, max_allowed_error=0)
@params((((201, 260), 60, False, 0)), (((301, 330), 30, True, 29)), (((101, 150), 50, False, None)),
(((701, 710), 10, True, None)))
def test_randomly_negate(mag_range, num_magnitude_bins, use_implicit_sign, const_mag):
batch_size = 64
@augmentation(mag_range=mag_range, randomly_negate=True)
def pass_through_mag(data, param):
return param
@pipeline_def(num_threads=4, device_id=0, batch_size=batch_size, seed=42)
def pipeline():
magnitude_bin = const_mag if const_mag is not None else sample_info(
lambda info: info.idx_in_batch % num_magnitude_bins)
if not use_implicit_sign:
magnitude_sign = sample_info(lambda info: info.idx_in_batch % 2)
magnitude_bin = signed_bin(magnitude_bin, magnitude_sign)
return pass_through_mag(types.Constant(42), magnitude_bin=magnitude_bin,
num_magnitude_bins=num_magnitude_bins)
if not use_implicit_sign:
p = pipeline()
else:
warn_glob = "but unsigned `magnitude_bin` was passed to the augmentation call"
with assert_warns(Warning, glob=warn_glob):
p = pipeline()
p.build()
magnitudes, = p.run()
magnitudes = [np.array(el) for el in magnitudes]
if use_implicit_sign:
# the implicit sign is random, just sanity check if
# there are some positive and negative magnitudes
assert any(el < 0 for el in magnitudes)
assert any(el > 0 for el in magnitudes)
magnitudes = [np.abs(el) for el in magnitudes]
mag_sign = None if use_implicit_sign else [i % 2 for i in range(batch_size)]
magnitude_bin = [const_mag] * batch_size if const_mag is not None else [
i % num_magnitude_bins for i in range(batch_size)
]
ref_magnitudes = ref_param(mag_range, num_magnitude_bins, magnitude_bin,
mag_signs_batch=mag_sign)
check_batch(magnitudes, ref_magnitudes, max_allowed_error=0)
@params((4, ), (None, ))
def test_no_randomly_negate(const_mag):
mag_range = (0, 10)
num_magnitude_bins = 11
batch_size = 32
@augmentation(mag_range=mag_range)
def pass_through_mag(data, param):
return param
@pipeline_def(num_threads=4, device_id=0, batch_size=batch_size, seed=42)
def pipeline():
magnitude_bin = const_mag if const_mag is not None else sample_info(
lambda info: info.idx_in_batch % num_magnitude_bins)
# make sure that the augmentation declared without `randomly_negate` ignores the signed_bin
return pass_through_mag(types.Constant(42), magnitude_bin=signed_bin(magnitude_bin),
num_magnitude_bins=num_magnitude_bins)
p = pipeline()
p.build()
magnitudes, = p.run()
magnitude_bin = [const_mag] * batch_size if const_mag is not None else [
i % num_magnitude_bins for i in range(batch_size)
]
ref_magnitudes = ref_param(mag_range, 11, magnitude_bin)
check_batch(magnitudes, ref_magnitudes, max_allowed_error=0)
@params((((201, 211), 11, 7, np.uint16, "cpu")), (((101, 107), 7, None, np.float32, "gpu")))
def test_mag_to_param(mag_range, num_magnitude_bins, const_mag, dtype, param_device):
batch_size = 31
def mag_to_param(magnitude):
return np.array([magnitude, magnitude + 2, 42], dtype=dtype)
@augmentation(mag_range=mag_range, randomly_negate=True, mag_to_param=mag_to_param,
param_device=param_device)
def pass_through_mag(data, param):
return param
@pipeline_def(num_threads=4, device_id=0, batch_size=batch_size, seed=42)
def pipeline():
mag_sign = sample_info(lambda info: info.idx_in_batch % 2)
magnitude_bin = const_mag if const_mag is not None else sample_info(
lambda info: info.idx_in_batch % num_magnitude_bins)
return pass_through_mag(types.Constant(42),
magnitude_bin=signed_bin(magnitude_bin, mag_sign),
num_magnitude_bins=num_magnitude_bins)
p = pipeline()
p.build()
magnitudes, = p.run()
if param_device == "cpu":
assert isinstance(magnitudes, _tensors.TensorListCPU)
else:
assert isinstance(magnitudes, _tensors.TensorListGPU)
magnitudes = magnitudes.as_cpu()
magnitudes = [np.array(el) for el in magnitudes]
mag_sign = [i % 2 for i in range(batch_size)]
magnitude_bin = [const_mag] * batch_size if const_mag is not None else [
i % num_magnitude_bins for i in range(batch_size)
]
ref_magnitudes = ref_param(mag_range, num_magnitude_bins, magnitude_bin,
mag_signs_batch=mag_sign, mag_to_param=mag_to_param)
assert np.array(magnitudes).dtype == np.array(ref_magnitudes).dtype
check_batch(magnitudes, ref_magnitudes, max_allowed_error=0)
def test_augmentation_setup_update():
def dummy_mag_to_param(magnitude):
return magnitude + 1
initial = {
"mag_range": (0, 10),
"randomly_negate": True,
"mag_to_param": dummy_mag_to_param,
"param_device": "gpu",
"name": "some_other_dummy_name",
}
@augmentation
def default_aug(data, _):
return data
defaults = {attr: getattr(default_aug, attr) for attr in initial}
defaults["name"] = "dummy"
@augmentation(**initial)
def dummy(data, _):
return data
for reset_attr in initial:
reset_attr_aug = dummy.augmentation(**{reset_attr: None})
for attr in initial:
reset = getattr(reset_attr_aug, attr)
ref = (defaults if attr == reset_attr else initial)[attr]
assert reset == ref, f"{attr}: {reset}, {ref} ({reset_attr})"
def test_augmentation_nested_decorator_fail():
@augmentation
def dummy(data, _):
return data
with assert_raises(Exception,
glob="The `@augmentation` was applied to already decorated Augmentation."):
augmentation(dummy, mag_range=(5, 10))
def test_mag_to_param_data_node_fail():
def shear(magnitude):
return fn.transforms.shear(shear=magnitude)
@augmentation(mag_range=(0, 250), mag_to_param=shear)
def illegal_shear(data, shear_mt):
return fn.warp_affine(data, mt=shear_mt)
@pipeline_def(num_threads=4, device_id=0, batch_size=8, seed=42)
def pipeline():
data = types.Constant(np.full((100, 100, 3), 42, dtype=np.uint8))
return illegal_shear(data, magnitude_bin=5, num_magnitude_bins=10)
glob_msg = "callback must return parameter that is `np.ndarray` or"
with assert_raises(Exception, glob=glob_msg):
pipeline()
@params((True, False), (False, True))
def test_mag_to_param_non_uniform_fail(non_uniform_shape, non_uniform_type):
shape_lo = (2, )
shape_hi = (3, )
def mag_to_param(magnitude):
shape = shape_lo if not non_uniform_shape or magnitude < 5 else shape_hi
dtype = np.uint8 if not non_uniform_type or magnitude < 3 else np.uint16
return np.full(shape, 42, dtype=dtype)
@augmentation(mag_range=(0, 10), mag_to_param=mag_to_param)
def pass_param(data, param):
return param
@pipeline_def(num_threads=4, device_id=0, batch_size=8, seed=42)
def pipeline():
data = types.Constant(np.full((100, 100, 3), 42, dtype=np.uint8))
mag_bin = sample_info(lambda si: si.idx_in_batch)
return pass_param(data, magnitude_bin=mag_bin, num_magnitude_bins=11)
glob_msg = (f"augmentation must return the arrays of the same type and shape *"
f"has shape {shape_hi if non_uniform_shape else shape_lo} and type "
f"{'uint16' if non_uniform_type else 'uint8'}.")
with assert_raises(Exception, glob=glob_msg):
pipeline()
def test_lack_of_positional_args_fail():
def no_args():
pass
def one_arg(arg):
pass
def one_kwarg_only(arg, *, kwarg_only):
pass
def kwarg_only(*, kwarg1, kwarg2):
pass
for i, fun in enumerate((no_args, kwarg_only, one_arg, one_kwarg_only)):
msg = f"accepts {i // 2} positional argument(s), but the functions decorated"
with assert_raises(Exception, glob=msg):
augmentation(fun)
def test_no_required_kwargs():
@augmentation
def aug(data, param, extra, another_extra, extra_with_default=None):
pass
@pipeline_def(batch_size=3, num_threads=4, device_id=0, seed=42)
def pipeline(aug, aug_kwargs):
return aug(types.Constant(42), **aug_kwargs)
pipeline(aug, {'extra': None, 'another_extra': 42, 'extra_with_default': 7})
pipeline(aug, {'extra': None, 'another_extra': 42})
with assert_raises(Exception, glob="not provided to the call: another_extra"):
pipeline(aug, {'extra': None})
with assert_raises(Exception, glob="not provided to the call: extra"):
pipeline(aug, {'another_extra': 42})
with assert_raises(Exception, glob="not provided to the call: extra, another_extra"):
pipeline(aug, {})
def test_unused_kwargs():
@augmentation
def no_extra(data, _):
pass
@augmentation
def aug(data, param, one_param, another_param):
pass
@augmentation
def another_aug(data, _, another_param, yet_another_param):
pass
augments = (no_extra, aug, another_aug)
forbid_unused_kwargs(augments, {}, 'dummy')
forbid_unused_kwargs(augments, {
'one_param': 1,
'another_param': 2,
'yet_another_param': 3
}, 'dummy')
with assert_raises(Exception, glob="The kwarg `amnother_param` is not used"):
forbid_unused_kwargs(augments, {
'one_param': 1,
'amnother_param': 2,
'yet_another_param': 3
}, 'dummy')
with assert_raises(Exception, glob="The kwargs `amnother_param, yemt_another_param` are"):
forbid_unused_kwargs(augments, {
'one_param': 1,
'amnother_param': 2,
'yemt_another_param': 3
}, 'dummy')
|
DALI-main
|
dali/test/python/auto_aug/test_augmentation_decorator.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import random
import unittest
import numpy as np
from scipy.stats import chisquare
from nose2.tools import params
from nvidia.dali import fn, tensors, types
from nvidia.dali import pipeline_def
from nvidia.dali.auto_aug import rand_augment
from nvidia.dali.auto_aug.core import augmentation
from test_utils import get_dali_extra_path, check_batch
from nose_utils import assert_raises
data_root = get_dali_extra_path()
images_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
vid_dir = os.path.join(data_root, "db", "video", "sintel", "video_files")
vid_files = ["sintel_trailer-720p_3.mp4"]
vid_filenames = [os.path.join(vid_dir, vid_file) for vid_file in vid_files]
def debug_discrepancy_helper(*batch_pairs):
"""
Accepts list of triples: left_batch, right_batch, name of a batch.
Prepares a list of statistics for any differences between samples in the corresponding batches.
"""
def as_array_list(batch):
if isinstance(batch, tensors.TensorListGPU):
batch = batch.as_cpu()
return [np.array(sample) for sample in batch]
batch_names = [name for _, _, name in batch_pairs]
batch_pairs = [(as_array_list(left), as_array_list(right)) for left, right, _ in batch_pairs]
batch_stats = []
for batch_name, batch_pair in zip(batch_names, batch_pairs):
left, right = batch_pair
num_samples = len(left), len(right)
sample_diffs = []
for sample_idx, (sample_left, sample_right) in enumerate(zip(left, right)):
if sample_left.shape != sample_right.shape:
sample_diffs.append({
"sample_idx": sample_idx,
"sample_left_shape": sample_left.shape,
"sample_right_shape": sample_right.shape,
})
else:
absdiff = np.maximum(sample_right, sample_left) - np.minimum(
sample_right, sample_left)
err = np.mean(absdiff)
max_err = np.max(absdiff)
min_err = np.min(absdiff)
total_errors = np.sum(absdiff != 0)
if any(val != 0 for val in (err, max_err, max_err, total_errors)):
sample_diffs.append({
"sample_idx": sample_idx,
"mean_error": err,
"max_error": max_err,
"min_err": min_err,
"total_errors": total_errors,
"shape": sample_left.shape
})
batch_stats.append({
"batch_name": batch_name,
"num_samples": num_samples,
"sample_diffs": sample_diffs
})
return batch_stats
@params(*tuple(
enumerate(
itertools.product(("cpu", "gpu"), (True, False), (True, False), (None, 0), (True, False)))))
def test_run_rand_aug(i, args):
dev, uniformly_resized, use_shape, fill_value, specify_translation_bounds = args
# Keep batch_sizes ns and ms length co-prime
batch_sizes = [1, 8, 7, 13, 31, 64, 47]
ns = [1, 2, 3]
ms = [0, 12, 15, 30]
batch_size = batch_sizes[i % len(batch_sizes)]
n = ns[i % len(ns)]
m = ms[i % len(ms)]
@pipeline_def(enable_conditionals=True, batch_size=batch_size, num_threads=4, device_id=0,
seed=43)
def pipeline():
encoded_image, _ = fn.readers.file(name="Reader", file_root=images_dir)
decoded_image = fn.decoders.image(encoded_image, device="cpu" if dev == "cpu" else "mixed")
resized_image = decoded_image if not uniformly_resized else fn.resize(
decoded_image, size=(244, 244))
extra = {} if not use_shape else {"shape": fn.peek_image_shape(encoded_image)}
if fill_value is not None:
extra["fill_value"] = fill_value
if specify_translation_bounds:
if use_shape:
extra["max_translate_rel"] = 0.9
else:
extra["max_translate_abs"] = 400
raugmented_image = rand_augment.rand_augment(resized_image, n=n, m=m, **extra)
return encoded_image, decoded_image, resized_image, raugmented_image
# run the pipeline twice to make sure instantiation preserves determinism
p1 = pipeline()
p1.build()
p2 = pipeline()
p2.build()
for iteration_idx in range(3):
encoded1, decoded1, resized1, out1 = p1.run()
encoded2, decoded2, resized2, out2 = p2.run()
try:
check_batch(out1, out2)
except AssertionError as e:
diffs = debug_discrepancy_helper(
(encoded1, encoded2, "encoded"),
(decoded1, decoded2, "decoded"),
(resized1, resized2, "resized"),
(out1, out2, "out"),
)
iter_diff = {"iteration_idx": iteration_idx, "diffs": diffs}
raise AssertionError(
f"The outputs do not match, the differences between encoded, decoded, "
f"resized and augmented batches are respectively: {repr(iter_diff)}") from e
class VideoTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
num_frames = 31
roi_start = (90, 0)
roi_end = (630, 1280)
size_1 = (223, 367)
size_2 = (400, 100)
@pipeline_def(batch_size=6, device_id=0, num_threads=4, seed=42)
def pipeline(size):
video = fn.readers.video_resize(
filenames=vid_filenames,
sequence_length=num_frames,
roi_start=roi_start,
roi_end=roi_end,
resize_x=size[1],
resize_y=size[0],
file_list_include_preceding_frame=True,
device='gpu',
)
return video
cls.vid_files = []
for size in (size_1, size_2):
p = pipeline(size=size)
p.build()
out, = p.run()
cls.vid_files.extend(np.array(sample) for sample in out.as_cpu())
@params(*tuple(
enumerate((
("cpu", 4, False, 2, 8, True),
("cpu", 2, True, 2, 10, False),
("gpu", 7, False, 3, 5, True),
("gpu", 1, True, 1, 7, True),
))))
def test_uniform(self, i, args):
device, batch_size, use_shape, n, m, monotonic_mag = args
num_iterations = 3
assert device in ("gpu", "cpu")
@pipeline_def(batch_size=batch_size, device_id=0, num_threads=4, seed=42,
enable_conditionals=True)
def pipeline():
rng = random.Random(42 + i)
video = fn.external_source(
source=lambda: list(rng.choices(self.vid_files, k=batch_size)), batch=True,
layout="FHWC")
extra = {} if not use_shape else {"shape": fn.shapes(video)[1:]}
extra["monotonic_mag"] = monotonic_mag
if device == "gpu":
video = video.gpu()
video = rand_augment.rand_augment(video, n=n, m=m, **extra)
return video
# run the pipeline twice to make sure instantiation preserves determinism
p1 = pipeline()
p1.build()
p2 = pipeline()
p2.build()
for _ in range(num_iterations):
out1, = p1.run()
out2, = p2.run()
check_batch(out1, out2)
@params(*tuple(enumerate(itertools.product(
['cpu', 'gpu'],
[True, False],
[1, 2, 3],
[2, 3],
))))
def test_ops_selection_and_mags(case_idx, args):
dev, use_sign, n, num_ops = args
num_magnitude_bins = 9
# the chisquare expects at least 5 elements in a bin and we can have around
# (num_ops * (2**use_signs)) ** n ops
batch_size = 2048
magnitude_cases = list(range(num_magnitude_bins))
m = magnitude_cases[case_idx % len(magnitude_cases)]
def mag_to_param_with_op_id(op_id):
def mag_to_param(magnitude):
return np.array([op_id, magnitude], dtype=np.int32)
return mag_to_param
@augmentation(param_device=dev)
def op(data, op_id_mag_id):
return fn.cat(data, op_id_mag_id)
augmentations = [
op.augmentation(mag_range=(10 * i + 1, 10 * i + num_magnitude_bins),
mag_to_param=mag_to_param_with_op_id(i + 1), randomly_negate=use_sign
and i % 3 == 0) for i in range(num_ops)
]
expected_counts = {}
seq_prob = 1. / (num_ops**n)
for aug_sequence in itertools.product(*([augmentations] * n)):
possible_signs = [(-1, 1) if aug.randomly_negate else (1, ) for aug in aug_sequence]
possible_signs = tuple(itertools.product(*possible_signs))
prob = seq_prob / len(possible_signs)
for signs in possible_signs:
assert len(aug_sequence) == len(signs)
outs = []
for aug, sign in zip(aug_sequence, signs):
mag = aug._get_magnitudes(num_magnitude_bins)[m]
op_id_mag = aug.mag_to_param(mag * sign)
outs.append(op_id_mag)
expected_counts[tuple(el for out in outs for el in out)] = prob
expected_counts = {output: p * batch_size for output, p in expected_counts.items()}
@pipeline_def(enable_conditionals=True, batch_size=batch_size, num_threads=4, device_id=0,
seed=42)
def pipeline():
data = types.Constant([], dtype=types.INT32)
if dev == "gpu":
data = data.gpu()
data = rand_augment.apply_rand_augment(augmentations, data, n=n, m=m,
num_magnitude_bins=num_magnitude_bins)
return fn.reshape(data, shape=(-1, 2))
p = pipeline()
p.build()
for i in range(3):
output, = p.run()
output = [np.array(s) for s in (output.as_cpu() if dev == "gpu" else output)]
actual_count = {allowed_out: 0 for allowed_out in expected_counts}
for sample in output:
assert len(sample) == n, f"{i} {sample}"
out = tuple(el for op_mag in sample for el in op_mag)
actual_count[out] += 1
actual = []
expected = []
for out in expected_counts:
actual.append(actual_count[out])
expected.append(expected_counts[out])
stat = chisquare(actual, expected)
assert 0.01 <= stat.pvalue <= 0.99, f"{stat} {actual} {expected}"
def test_wrong_params_fail():
@pipeline_def(batch_size=4, device_id=0, num_threads=4, seed=42, enable_conditionals=True)
def pipeline(n, m, num_magnitude_bins):
data = types.Constant(np.array([[[]]], dtype=np.uint8))
return rand_augment.rand_augment(data, n=n, m=m, num_magnitude_bins=num_magnitude_bins)
with assert_raises(Exception,
glob="The number of operations to apply `n` must be a non-negative integer"):
pipeline(n=None, m=1, num_magnitude_bins=11)
with assert_raises(Exception, glob="The `num_magnitude_bins` must be a positive integer, got"):
pipeline(n=1, m=1, num_magnitude_bins=None)
with assert_raises(Exception, glob="`m` must be an integer from `[[]0, 14[]]` range. Got 15."):
pipeline(n=1, m=15, num_magnitude_bins=15)
with assert_raises(Exception, glob="The `augmentations` list cannot be empty"):
@pipeline_def(batch_size=4, device_id=0, num_threads=4, seed=42, enable_conditionals=True)
def no_aug_pipeline():
data = types.Constant(np.array([[[]]], dtype=np.uint8))
return rand_augment.apply_rand_augment([], data, 1, 20)
no_aug_pipeline()
with assert_raises(Exception, glob="The augmentation `translate_x` requires `shape` argument"):
@pipeline_def(batch_size=4, device_id=0, num_threads=4, seed=42, enable_conditionals=True)
def missing_shape():
data = types.Constant(np.array([[[]]], dtype=np.uint8))
augments = rand_augment.get_rand_augment_suite(use_shape=True)
return rand_augment.apply_rand_augment(augments, data, 1, 20)
missing_shape()
with assert_raises(Exception, glob="The kwarg `shhape` is not used by any of the"):
@pipeline_def(batch_size=4, device_id=0, num_threads=4, seed=42, enable_conditionals=True)
def unused_kwarg():
data = types.Constant(np.array([[[]]], dtype=np.uint8))
augments = rand_augment.get_rand_augment_suite(use_shape=True)
return rand_augment.apply_rand_augment(augments, data, 1, 20, shhape=42)
unused_kwarg()
|
DALI-main
|
dali/test/python/auto_aug/test_rand_augment.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import numpy as np
from PIL import Image, ImageEnhance, ImageOps
from nose2.tools import params, cartesian_params
import nvidia.dali.tensors as _tensors
from nvidia.dali import fn, pipeline_def
from nvidia.dali.auto_aug import augmentations as a
from nvidia.dali.auto_aug.core._utils import get_translations as _get_translations
from test_utils import get_dali_extra_path, check_batch
data_root = get_dali_extra_path()
images_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
vid_file = os.path.join(data_root, 'db', 'video', 'sintel', 'sintel_trailer-720p.mp4')
def maybe_squeeze(img, axis=2):
if img.shape[axis] != 1:
return img
return np.squeeze(img, axis=axis)
def maybe_expand(img, axis=2):
if len(img.shape) != axis:
return img
return np.expand_dims(img, axis=axis)
def pil_baseline(pil_op):
def inner(sample, param=None):
p_sample = Image.fromarray(maybe_squeeze(sample))
p_out = pil_op(p_sample) if param is None else pil_op(p_sample, param)
return maybe_expand(np.array(p_out))
return inner
default_batch_size = 16
def compare_against_baseline(dali_aug, baseline_op, get_data, batch_size=default_batch_size,
dev="gpu", eps=1e-7, max_allowed_error=1e-6, params=None,
post_proc=None, use_shape=False, modality="image"):
@pipeline_def(batch_size=batch_size, num_threads=4, device_id=0, seed=42)
def pipeline():
data = get_data()
op_data = data if dev != "gpu" else data.gpu()
mag_bin = fn.external_source(lambda info: np.array(info.idx_in_batch, dtype=np.int32),
batch=False)
extra = {}
if use_shape:
shape = fn.shapes(data)
extra["shape"] = shape[int(modality == "video"):]
output = dali_aug(op_data, num_magnitude_bins=batch_size, magnitude_bin=mag_bin, **extra)
return output, data
p = pipeline()
p.build()
output, data, = p.run()
if dev == "gpu":
output = output.as_cpu()
output = [np.array(sample) for sample in output]
if isinstance(data, _tensors.TensorListGPU):
data = data.as_cpu()
data = [np.array(sample) for sample in data]
if modality == "image":
def apply_to_sample(f, sample, *params):
return f(sample, *params)
else:
def apply_to_sample(f, vid, *params):
return np.stack([f(frame, *params) for frame in vid])
if params is None:
ref_output = [apply_to_sample(baseline_op, sample) for sample in data]
else:
assert len(params) == len(data)
ref_output = [
apply_to_sample(baseline_op, sample, param) for sample, param in zip(data, params)
]
if post_proc is not None:
output = [apply_to_sample(post_proc, sample) for sample in output]
ref_output = [apply_to_sample(post_proc, sample) for sample in ref_output]
check_batch(output, ref_output, eps=eps, max_allowed_error=max_allowed_error)
def get_images():
image, _ = fn.readers.file(name="Reader", file_root=images_dir)
return fn.decoders.image(image, device="cpu")
def get_videos():
batch_size = 64
num_vids = 4
step = batch_size // num_vids
def get_size(sample_info):
size = (sample_info.idx_in_batch // step) * 25 + 200
return np.array([size, size + 7], dtype=np.float32)
@pipeline_def(batch_size=64, num_threads=4, device_id=0)
def pipeline():
image, _ = fn.readers.file(name="Reader", file_root=images_dir)
size = fn.external_source(source=get_size, batch=False)
image = fn.decoders.image(image, device="cpu")
return fn.resize(image, size=size)
p = pipeline()
p.build()
out, = p.run()
out = [np.array(sample) for sample in out]
vids = [np.stack([out[i * step + j] for j in range(step)]) for i in range(num_vids)]
def inner():
return fn.external_source(
source=lambda source_info: vids[source_info.idx_in_batch % len(vids)], batch=False,
layout="FHWC")
return inner
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_shear_x(modality, dev):
# adapted implementation from DeepLearningExamples:
# https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/
# Classification/ConvNets/image_classification/autoaugment.py
def shear_x_ref(img, magnitude):
return img.transform(img.size, Image.AFFINE, (1, -magnitude, 0, 0, 1, 0), Image.BILINEAR,
fillcolor=(128, ) * 3)
data_source = get_images if modality == "image" else get_videos()
shear_x = a.shear_x.augmentation(mag_range=(-0.3, 0.3), randomly_negate=False)
magnitudes = shear_x._get_magnitudes(default_batch_size)
compare_against_baseline(shear_x, pil_baseline(shear_x_ref), data_source, dev=dev,
params=magnitudes, max_allowed_error=None, eps=1, modality=modality)
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_shear_y(modality, dev):
# adapted implementation from DeepLearningExamples:
# https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/
# Classification/ConvNets/image_classification/autoaugment.py
def shear_y_ref(img, magnitude):
return img.transform(img.size, Image.AFFINE, (1, 0, 0, -magnitude, 1, 0), Image.BILINEAR,
fillcolor=(128, ) * 3)
data_source = get_images if modality == "image" else get_videos()
shear_y = a.shear_y.augmentation(mag_range=(-0.3, 0.3), randomly_negate=False)
magnitudes = shear_y._get_magnitudes(default_batch_size)
compare_against_baseline(shear_y, pil_baseline(shear_y_ref), data_source, dev=dev,
params=magnitudes, max_allowed_error=None, eps=1, modality=modality)
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_translate_x_no_shape(modality, dev):
# adapted implementation from DeepLearningExamples:
# https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/
# Classification/ConvNets/image_classification/autoaugment.py
def translate_x_ref(img, magnitude):
return img.transform(img.size, Image.AFFINE, (1, 0, -magnitude, 0, 1, 0), Image.BILINEAR,
fillcolor=(128, ) * 3)
data_source = get_images if modality == "image" else get_videos()
translate_x_no_shape = a.translate_x_no_shape.augmentation(mag_range=(-250, 250),
randomly_negate=False)
magnitudes = translate_x_no_shape._get_magnitudes(default_batch_size)
compare_against_baseline(translate_x_no_shape, pil_baseline(translate_x_ref), data_source,
dev=dev, params=magnitudes, max_allowed_error=None, eps=1,
modality=modality)
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_translate_x(modality, dev):
# adapted implementation from DeepLearningExamples:
# https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/
# Classification/ConvNets/image_classification/autoaugment.py
def translate_x_ref(img, magnitude):
return img.transform(img.size, Image.AFFINE, (1, 0, -magnitude * img.width, 0, 1, 0),
Image.BILINEAR, fillcolor=(128, ) * 3)
data_source = get_images if modality == "image" else get_videos()
translate_x = a.translate_x.augmentation(mag_range=(-1, 1), randomly_negate=False)
magnitudes = translate_x._get_magnitudes(default_batch_size)
compare_against_baseline(translate_x, pil_baseline(translate_x_ref), data_source, dev=dev,
params=magnitudes, use_shape=True, max_allowed_error=None, eps=1,
modality=modality)
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_translate_y_no_shape(modality, dev):
# adapted implementation from DeepLearningExamples:
# https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/
# Classification/ConvNets/image_classification/autoaugment.py
def translate_y_ref(img, magnitude):
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, -magnitude), Image.BILINEAR,
fillcolor=(128, ) * 3)
data_source = get_images if modality == "image" else get_videos()
translate_y_no_shape = a.translate_y_no_shape.augmentation(mag_range=(-250, 250),
randomly_negate=False)
magnitudes = translate_y_no_shape._get_magnitudes(default_batch_size)
compare_against_baseline(translate_y_no_shape, pil_baseline(translate_y_ref), data_source,
dev=dev, params=magnitudes, max_allowed_error=None, eps=1,
modality=modality)
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_translate_y(modality, dev):
# adapted implementation from DeepLearningExamples:
# https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/
# Classification/ConvNets/image_classification/autoaugment.py
def translate_y_ref(img, magnitude):
return img.transform(img.size, Image.AFFINE, (1, 0, 0, 0, 1, -magnitude * img.height),
Image.BILINEAR, fillcolor=(128, ) * 3)
data_source = get_images if modality == "image" else get_videos()
translate_y = a.translate_y.augmentation(mag_range=(-1, 1), randomly_negate=False)
magnitudes = translate_y._get_magnitudes(default_batch_size)
compare_against_baseline(translate_y, pil_baseline(translate_y_ref), data_source, dev=dev,
params=magnitudes, use_shape=True, max_allowed_error=None, eps=1,
modality=modality)
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_rotate(modality, dev):
# adapted implementation from DeepLearningExamples:
# https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/
# Classification/ConvNets/image_classification/autoaugment.py
def rotate_with_fill(img, magnitude):
rot = img.convert("RGBA").rotate(magnitude, resample=Image.BILINEAR)
return Image.composite(rot, Image.new("RGBA", img.size, (128, ) * 3), rot).convert(img.mode)
data_source = get_images if modality == "image" else get_videos()
rotate = a.rotate.augmentation(mag_range=(-30, 30), randomly_negate=False)
magnitudes = rotate._get_magnitudes(default_batch_size)
compare_against_baseline(rotate, pil_baseline(rotate_with_fill), data_source, dev=dev,
params=magnitudes, max_allowed_error=None, eps=1, modality=modality)
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_brightness(modality, dev):
def brightness_ref(img, magnitude):
return ImageEnhance.Brightness(img).enhance(magnitude)
data_source = get_images if modality == "image" else get_videos()
brightness = a.brightness.augmentation(mag_range=(0.1, 1.9), randomly_negate=False,
mag_to_param=None)
magnitudes = brightness._get_magnitudes(default_batch_size)
compare_against_baseline(brightness, pil_baseline(brightness_ref), data_source,
max_allowed_error=1, dev=dev, params=magnitudes, modality=modality)
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_contrast(modality, dev):
def contrast_ref(img, magnitude):
return ImageEnhance.Contrast(img).enhance(magnitude)
data_source = get_images if modality == "image" else get_videos()
contrast = a.contrast.augmentation(mag_range=(0.1, 1.9), randomly_negate=False,
mag_to_param=None)
magnitudes = contrast._get_magnitudes(default_batch_size)
compare_against_baseline(contrast, pil_baseline(contrast_ref), data_source, max_allowed_error=1,
dev=dev, params=magnitudes, modality=modality)
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_color(modality, dev):
max_allowed_error = 2
def color_ref(img, magnitude):
return ImageEnhance.Color(img).enhance(magnitude)
data_source = get_images if modality == "image" else get_videos()
color = a.color.augmentation(mag_range=(0.1, 1.9), randomly_negate=False, mag_to_param=None)
magnitudes = color._get_magnitudes(default_batch_size)
compare_against_baseline(color, pil_baseline(color_ref), data_source,
max_allowed_error=max_allowed_error, dev=dev, params=magnitudes,
modality=modality)
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_sharpness(modality, dev):
def sharpness_ref(img, magnitude):
return ImageEnhance.Sharpness(img).enhance(magnitude)
def post_proc(img):
# pill applies convolution in valid mode (so for 3x3 kernel used,
# the output is smaller by one pixel at each end, and then pastes
# the filtered image onto the original). We don't do the
# pasting
return img[1:-1, 1:-1, :]
data_source = get_images if modality == "image" else get_videos()
sharpness = a.sharpness.augmentation(mag_range=(0.1, 1.9), randomly_negate=False,
mag_to_param=a.sharpness_kernel_shifted)
magnitudes = sharpness._get_magnitudes(default_batch_size)
compare_against_baseline(sharpness, pil_baseline(sharpness_ref), data_source,
max_allowed_error=1, dev=dev, params=magnitudes, post_proc=post_proc,
modality=modality)
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_posterize(modality, dev):
data_source = get_images if modality == "image" else get_videos()
# note, 0 is remapped to 1 as in tf implementation referred in the RA paper, thus (1, 8) range
posterize = a.posterize.augmentation(param_device=dev, mag_range=(1, 8))
magnitudes = np.round(posterize._get_magnitudes(default_batch_size)).astype(np.int32)
compare_against_baseline(posterize, pil_baseline(ImageOps.posterize), data_source,
max_allowed_error=1, dev=dev, params=magnitudes, modality=modality)
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_solarize(modality, dev):
data_source = get_images if modality == "image" else get_videos()
solarize = a.solarize.augmentation(param_device=dev)
magnitudes = solarize._get_magnitudes(default_batch_size)
params = solarize._map_mags_to_params(magnitudes)
compare_against_baseline(solarize, pil_baseline(ImageOps.solarize), data_source,
max_allowed_error=1, dev=dev, params=params, modality=modality)
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_solarize_add(modality, dev):
# adapted the implementation from DeepLearningExamples:
# https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/
# Classification/ConvNets/image_classification/autoaugment.py
def solarize_add_ref(image, magnitude):
threshold = 128
lut = []
for i in range(256):
if i < threshold:
res = i + magnitude if i + magnitude <= 255 else 255
res = res if res >= 0 else 0
lut.append(res)
else:
lut.append(i)
return ImageOps._lut(image, lut)
data_source = get_images if modality == "image" else get_videos()
solarize_add = a.solarize_add.augmentation(param_device=dev)
magnitudes = solarize_add._get_magnitudes(default_batch_size)
params = solarize_add._map_mags_to_params(magnitudes)
compare_against_baseline(solarize_add, pil_baseline(solarize_add_ref), data_source,
max_allowed_error=1, dev=dev, params=params, modality=modality)
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_invert(modality, dev):
data_source = get_images if modality == "image" else get_videos()
compare_against_baseline(a.invert, pil_baseline(ImageOps.invert), data_source,
max_allowed_error=1, dev=dev, modality=modality)
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_equalize(modality, dev):
# pil's equalization uses slightly different formula when
# transforming cumulative-sum of histogram into lookup table than open-cv
# so the point-wise diffs can be significant, but the average is not
data_source = get_images if modality == "image" else get_videos()
compare_against_baseline(a.equalize, pil_baseline(ImageOps.equalize), data_source,
max_allowed_error=None, dev=dev, eps=7, modality=modality)
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_auto_contrast(modality, dev):
data_source = get_images if modality == "image" else get_videos()
compare_against_baseline(a.auto_contrast, pil_baseline(ImageOps.autocontrast), data_source,
max_allowed_error=1, dev=dev, modality=modality)
# Check edge cases (single-value channels)
@cartesian_params(("image", "video"), ("cpu", "gpu"))
def test_auto_contrast_mono_channels(modality, dev):
def modal_shape(shape, num_frames=15):
if modality != "video":
return shape
return (num_frames, ) + shape
rng = np.random.default_rng(seed=42)
const_single_channel = np.full(modal_shape((101, 205, 1)), 0, dtype=np.uint8)
const_multi_channel = np.full(modal_shape((200, 512, 3)), 255, dtype=np.uint8)
const_multi_per_channel = np.stack([
np.full(modal_shape((300, 300), 7), 254, dtype=np.uint8),
np.full(modal_shape((300, 300), 7), 159, dtype=np.uint8),
np.full(modal_shape((300, 300), 7), 1, dtype=np.uint8)
], axis=-1)
rnd_uniform_and_fixed_channel = np.stack([
np.uint8(rng.uniform(50, 160, modal_shape((400, 400), 32))),
np.full(modal_shape((400, 400), 32), 159, dtype=np.uint8),
np.uint8(rng.uniform(0, 255, modal_shape((400, 400), 32))),
], axis=-1)
imgs = [
const_single_channel, const_multi_channel, const_multi_per_channel,
rnd_uniform_and_fixed_channel
]
def get_batch():
layout = "HWC" if modality != "video" else "FHWC"
return fn.external_source(lambda: imgs, batch=True, layout=layout)
compare_against_baseline(a.auto_contrast, pil_baseline(ImageOps.autocontrast), get_batch,
batch_size=len(imgs), max_allowed_error=1, dev=dev, modality=modality)
@params(*tuple(itertools.product((True, False), (0, 1), ('height', 'width', 'both', 'none'))))
def test_translation_helper(use_shape, offset_fraction, extent):
# make sure the translation helper processes the args properly
# note, it only uses translate_y (as it is in imagenet policy)
default_abs = 123
default_rel = 0.123
height, width = 300, 700
shape = [height, width]
params = {}
assert extent in ('height', 'width', 'both', 'none'), f"{extent}"
if extent != 'none':
if use_shape:
param_shape = [1., 1.]
param_name = "max_translate_rel"
else:
param_shape = shape
param_name = "max_translate_abs"
if extent == 'both':
param = [param_shape[0] * offset_fraction, param_shape[1] * offset_fraction]
elif extent == 'height':
param = [param_shape[0] * offset_fraction, 0]
else:
assert extent == 'width'
param = [0, param_shape[1] * offset_fraction]
params[param_name] = param
translate_x, translate_y = _get_translations(use_shape, default_abs, default_rel, **params)
if use_shape:
assert translate_x.op is a.translate_x.op
assert translate_y.op is a.translate_y.op
else:
assert translate_x.op is a.translate_x_no_shape.op
assert translate_y.op is a.translate_y_no_shape.op
mag_ranges = [translate_x.mag_range, translate_y.mag_range]
if extent == "none":
expected_height = default_rel if use_shape else default_abs
expected_width = expected_height
elif use_shape:
expected_height = offset_fraction
expected_width = offset_fraction
else:
expected_height = height * offset_fraction
expected_width = width * offset_fraction
if extent == "both":
assert translate_x.mag_range == (0, expected_width), f"{mag_ranges} {expected_width}"
assert translate_y.mag_range == (0, expected_height), f"{mag_ranges} {expected_height}"
elif extent == "height":
assert translate_x.mag_range == (0, 0), f"{mag_ranges}"
assert translate_y.mag_range == (0, expected_height), f"{mag_ranges} {expected_height}"
elif extent == "width":
assert translate_x.mag_range == (0, expected_width), f"{mag_ranges} {expected_width}"
assert translate_y.mag_range == (0, 0), f"{mag_ranges}"
else:
assert extent == "none"
assert translate_x.mag_range == (0, expected_width), f"{mag_ranges}"
assert translate_y.mag_range == (0, expected_height), f"{mag_ranges}"
|
DALI-main
|
dali/test/python/auto_aug/test_augmentations.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import unittest
import random
import numpy as np
from scipy.stats import chisquare
from nose2.tools import params
from nvidia.dali import fn, types
from nvidia.dali import pipeline_def
from nvidia.dali.auto_aug import auto_augment, augmentations as a
from nvidia.dali.auto_aug.core import augmentation, Policy
from test_utils import get_dali_extra_path, check_batch
from nose_utils import assert_raises, assert_warns
data_root = get_dali_extra_path()
images_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
vid_dir = os.path.join(data_root, "db", "video", "sintel", "video_files")
vid_files = ["sintel_trailer-720p_2.mp4"]
vid_filenames = [os.path.join(vid_dir, vid_file) for vid_file in vid_files]
def mag_to_param_with_op_id(op_id):
def mag_to_param(magnitude):
return np.array([op_id, magnitude], dtype=np.int32)
return mag_to_param
@pipeline_def(enable_conditionals=True, num_threads=4, device_id=0, seed=44)
def concat_aug_pipeline(dev, policy):
data = types.Constant(np.array([], dtype=np.int32), device=dev)
if dev == "gpu":
data = data.gpu()
data = auto_augment.apply_auto_augment(policy, data)
return fn.reshape(data, shape=(-1, 2))
def collect_sub_policy_outputs(sub_policies, num_magnitude_bins):
sub_policy_outputs = []
for sub_policy in sub_policies:
out = []
for aug, _, mag_bin in sub_policy:
magnitudes = aug._get_magnitudes(num_magnitude_bins)
param = aug._map_mag_to_param(magnitudes[mag_bin])
out.append(param)
sub_policy_outputs.append(out)
return sub_policy_outputs
run_aug_shape = ("image_net", "reduced_cifar10", "svhn")
run_aug_shape_supporting_cases = (
# reduce the number of test cases by running three predefine shape-aware policies in turns,
((run_aug_shape[i % 3], ) + params) for i, params in enumerate(
itertools.product(
("cpu", "gpu"),
(True, False),
(True, False),
(None, 0),
(True, False),
)))
run_aug_no_translation_cases = itertools.product(
("reduced_image_net", ),
("cpu", "gpu"),
(True, False),
(False, ),
(None, 0),
(False, ),
)
@params(*tuple(
enumerate(itertools.chain(run_aug_shape_supporting_cases, run_aug_no_translation_cases))))
def test_run_auto_aug(i, args):
policy_name, dev, uniformly_resized, use_shape, fill_value, specify_translation_bounds = args
batch_sizes = [1, 8, 7, 64, 13, 41]
batch_size = batch_sizes[i % len(batch_sizes)]
@pipeline_def(enable_conditionals=True, batch_size=batch_size, num_threads=4, device_id=0,
seed=43)
def pipeline():
encoded_image, _ = fn.readers.file(name="Reader", file_root=images_dir)
image = fn.decoders.image(encoded_image, device="cpu" if dev == "cpu" else "mixed")
if uniformly_resized:
image = fn.resize(image, size=(244, 244))
extra = {} if not use_shape else {"shape": fn.peek_image_shape(encoded_image)}
if fill_value is not None:
extra["fill_value"] = fill_value
if specify_translation_bounds:
if use_shape:
extra["max_translate_rel"] = 0.9
else:
extra["max_translate_abs"] = 400
image = auto_augment.auto_augment(image, policy_name, **extra)
return image
# run the pipeline twice to make sure instantiation preserves determinism
p1 = pipeline()
p1.build()
p2 = pipeline()
p2.build()
for _ in range(3):
out1, = p1.run()
out2, = p2.run()
check_batch(out1, out2)
class VideoTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
num_frames = 31
roi_start = (90, 0)
roi_end = (630, 1280)
size_1 = (215, 128)
size_2 = (215, 220)
@pipeline_def(batch_size=6, device_id=0, num_threads=4, seed=42)
def pipeline(size):
video = fn.readers.video_resize(
filenames=vid_filenames,
sequence_length=num_frames,
roi_start=roi_start,
roi_end=roi_end,
resize_x=size[1],
resize_y=size[0],
file_list_include_preceding_frame=True,
device='gpu',
)
return video
cls.vid_files = []
for size in (size_1, size_2):
p = pipeline(size=size)
p.build()
out, = p.run()
cls.vid_files.extend(np.array(sample) for sample in out.as_cpu())
@params(*tuple(
enumerate((
("cpu", "image_net", 1, True),
("cpu", "reduced_cifar10", 4, False),
("cpu", "svhn", 17, True),
("cpu", "reduced_image_net", 3, False),
("gpu", "image_net", 21, False),
("gpu", "reduced_cifar10", 3, True),
("gpu", "svhn", 1, False),
("gpu", "reduced_image_net", 5, False),
))))
def test_uniform(self, i, args):
device, policy_name, batch_size, use_shape = args
num_iterations = 3
assert device in ("gpu", "cpu")
@pipeline_def(batch_size=batch_size, device_id=0, num_threads=4, seed=205,
enable_conditionals=True)
def pipeline():
rng = random.Random(42 + i)
video = fn.external_source(
source=lambda: list(rng.choices(self.vid_files, k=batch_size)), batch=True,
layout="FHWC")
extra = {} if not use_shape else {"shape": fn.shapes(video)[1:]}
if device == "gpu":
video = video.gpu()
video = auto_augment.auto_augment(video, policy_name, **extra)
return video
# run the pipeline twice to make sure instantiation preserves determinism
p1 = pipeline()
p1.build()
p2 = pipeline()
p2.build()
for _ in range(num_iterations):
out1, = p1.run()
out2, = p2.run()
check_batch(out1, out2)
@params(
(False, "cpu", 256),
(False, "gpu", 512),
(True, "cpu", 400),
(True, "gpu", 348),
)
def test_sub_policy(randomly_negate, dev, batch_size):
num_magnitude_bins = 10
@augmentation(
mag_range=(0, 9),
mag_to_param=mag_to_param_with_op_id(1),
param_device=dev,
)
def first(data, op_id_mag_id):
return fn.cat(data, op_id_mag_id)
@augmentation(
mag_range=(10, 19),
mag_to_param=mag_to_param_with_op_id(2),
randomly_negate=randomly_negate,
param_device=dev,
)
def second(data, op_id_mag_id):
return fn.cat(data, op_id_mag_id)
@augmentation(
mag_range=(20, 29),
mag_to_param=mag_to_param_with_op_id(3),
randomly_negate=randomly_negate,
param_device=dev,
)
def third(data, op_id_mag_id):
return fn.cat(data, op_id_mag_id)
sub_policies = [
[(first, 1, 0), (second, 1, 5), (third, 1, 3)],
[(first, 1, 1), (third, 1, 4), (first, 1, 2)],
[(second, 1, 2), (first, 1, 3), (third, 1, 4)],
[(second, 1, 3), (third, 1, 2), (first, 1, 5)],
[(third, 1, 4), (first, 1, 1), (second, 1, 1)],
[(third, 1, 5), (second, 1, 9), (first, 1, 2)],
[(first, 1, 6), (first, 1, 1)],
[(third, 1, 7)],
[(first, 1, 8), (first, 1, 4), (second, 1, 7), (second, 1, 6)],
]
policy = Policy("MyPolicy", num_magnitude_bins=num_magnitude_bins, sub_policies=sub_policies)
p = concat_aug_pipeline(batch_size=batch_size, dev=dev, policy=policy)
p.build()
sub_policy_outputs = collect_sub_policy_outputs(sub_policies, num_magnitude_bins)
# magnitudes are chosen so that the magnitude of the first op in
# each sub-policy identifies the sub-policy
assert len({out[0][1] for out in sub_policy_outputs}) == len(sub_policy_outputs)
output_cases = {out[0][1]: np.array(out) for out in sub_policy_outputs}
sub_policy_negation_cases = []
for sub_policy in sub_policies:
negated = []
for aug, _, _ in sub_policy:
if aug.randomly_negate:
negated.append((True, False))
else:
negated.append((False, ))
sub_policy_negation_cases.append(list(itertools.product(*negated)))
assert len(sub_policy_outputs) == len(sub_policy_negation_cases)
for _ in range(5):
output, = p.run()
if dev == "gpu":
output = output.as_cpu()
output = [np.array(sample) for sample in output]
for sample in output:
test_sample = sample if not randomly_negate else np.abs(sample)
np.testing.assert_equal(np.abs(test_sample), output_cases[test_sample[0][1]])
for op_mag in sample:
if op_mag[1] < 0:
# the `second` and `third` augmentation are marked as randomly_negated
assert op_mag[0] in [2, 3], f"{sample}"
if randomly_negate:
# for each sub-policy, count occurrences of any possible sequence
# of magnitude signs
negation_cases = {
out[0][1]: {case: 0
for case in cases}
for out, cases in zip(sub_policy_outputs, sub_policy_negation_cases)
}
for sample in output:
mag_signs = tuple(op_mag[1] < 0 for op_mag in sample)
negation_cases[np.abs(sample[0][1])][mag_signs] += 1
counts, expected_counts = [], []
for sub_policy_cases in negation_cases.values():
expected = batch_size / (len(sub_policies) * len(sub_policy_cases))
for count in sub_policy_cases.values():
counts.append(count)
expected_counts.append(expected)
stat = chisquare(counts, expected_counts)
# assert that the magnitudes negation looks independently enough
# (0.05 <=), but also that it is not too ideal (i.e. like all
# cases happening exactly the expected number of times)
assert 0.05 <= stat.pvalue <= 0.95, f"{stat}"
@params(("cpu", ), ("gpu", ))
def test_op_skipping(dev):
num_magnitude_bins = 20
batch_size = 2400
@augmentation(
mag_range=(0, 19),
mag_to_param=mag_to_param_with_op_id(1),
randomly_negate=True,
param_device=dev,
)
def first(data, op_id_mag_id):
return fn.cat(data, op_id_mag_id)
@augmentation(
mag_range=(0, 19),
mag_to_param=mag_to_param_with_op_id(2),
randomly_negate=True,
param_device=dev,
)
def second(data, op_id_mag_id):
return fn.cat(data, op_id_mag_id)
@augmentation(
mag_range=(0, 19),
mag_to_param=mag_to_param_with_op_id(3),
param_device=dev,
)
def third(data, op_id_mag_id):
return fn.cat(data, op_id_mag_id)
@augmentation(
mag_range=(0, 19),
mag_to_param=mag_to_param_with_op_id(4),
param_device=dev,
)
def first_stage_only(data, op_id_mag_id):
return fn.cat(data, op_id_mag_id)
@augmentation(
mag_range=(0, 19),
mag_to_param=mag_to_param_with_op_id(5),
param_device=dev,
)
def second_stage_only(data, op_id_mag_id):
return fn.cat(data, op_id_mag_id)
sub_policies = [
[(first, 0.5, 1), (first, 0.25, 2)],
[(second, 0.8, 3), (second, 0.7, 4)],
[(first, 0.9, 5), (second, 0.6, 6)],
[(second, 0.3, 7), (first, 0.25, 8)],
[(third, 1, 9), (third, 0.75, 10)],
[(third, 0.3, 11), (first, 0.22, 12)],
[(second, 0.6, 13), (third, 0, 14)],
[(first_stage_only, 0.5, 15), (third, 0.7, 16)],
[(third, 0.8, 17), (second_stage_only, 0.6, 18)],
]
# sub_policy_cases = [[] for _ in range(len(sub_policies))]
expected_counts = {tuple(): 0.}
for (left_aug, left_p, left_mag), (right_aug, right_p, right_mag) in sub_policies:
expected_counts[tuple()] += (1. - left_p) * (1 - right_p) / len(sub_policies)
only_left_p = left_p * (1 - right_p) / len(sub_policies)
only_right_p = (1 - left_p) * right_p / len(sub_policies)
for aug, mag, prob in [(left_aug, left_mag, only_left_p),
(right_aug, right_mag, only_right_p)]:
if not aug.randomly_negate:
expected_counts[(mag, )] = prob
else:
expected_counts[(mag, )] = prob / 2
expected_counts[(-mag, )] = prob / 2
sign_cases = [(-1, 1) if aug.randomly_negate else (1, ) for aug in (left_aug, right_aug)]
sign_cases = list(itertools.product(*sign_cases))
prob = left_p * right_p / len(sub_policies)
for left_sign, right_sign in sign_cases:
mags = (left_sign * left_mag, right_sign * right_mag)
expected_counts[mags] = prob / len(sign_cases)
expected_counts = {mag: prob * batch_size for mag, prob in expected_counts.items() if prob > 0}
assert all(num_elements >= 5 for num_elements in expected_counts.values()), \
f"The batch size is too small (i.e. some output cases are expected less " \
f"than five times in the output): {expected_counts}"
policy = Policy("MyPolicy", num_magnitude_bins=num_magnitude_bins, sub_policies=sub_policies)
p = concat_aug_pipeline(batch_size=batch_size, dev=dev, policy=policy)
p.build()
for _ in range(5):
output, = p.run()
if dev == "gpu":
output = output.as_cpu()
output = [np.array(sample) for sample in output]
actual_counts = {allowed_case: 0 for allowed_case in expected_counts}
for sample in output:
mags = tuple(int(op_mag[1]) for op_mag in sample)
actual_counts[mags] += 1
actual, expected = [], []
for mags in expected_counts:
actual.append(actual_counts[mags])
expected.append(expected_counts[mags])
stat = chisquare(actual, expected)
# assert that the magnitudes negation looks independently enough
# (0.05 <=), but also that it is not too ideal (i.e. like all
# cases happening exactly the expected number of times)
assert 0.05 <= stat.pvalue <= 0.95, f"{stat}"
def test_policy_presentation():
empty_policy = Policy("EmptyPolicy", num_magnitude_bins=31, sub_policies=[])
empty_policy_str = str(empty_policy)
assert "sub_policies=[]" in empty_policy_str, empty_policy_str
assert "augmentations={}" in empty_policy_str, empty_policy_str
def get_first_augment():
@augmentation(mag_range=(100, 200))
def clashing_name(data, _):
return data
return clashing_name
def get_second_augment():
@augmentation
def clashing_name(data, _):
return data
return clashing_name
one = get_first_augment()
another = get_second_augment()
sub_policies = [[(one, 0.1, 5), (another, 0.4, None)], [(another, 0.2, None), (one, 0.5, 2)],
[(another, 0.7, None)]]
policy = Policy(name="DummyPolicy", num_magnitude_bins=11, sub_policies=sub_policies)
assert policy.sub_policies[0][0][0] is policy.sub_policies[1][1][0]
assert policy.sub_policies[0][1][0] is policy.sub_policies[1][0][0]
assert policy.sub_policies[0][1][0] is policy.sub_policies[2][0][0]
assert len(sub_policies) == len(policy.sub_policies)
for sub_pol, pol_sub_pol in zip(sub_policies, policy.sub_policies):
assert len(sub_pol) == len(pol_sub_pol)
for (aug, p, mag), (pol_aug, pol_p, pol_mag) in zip(sub_pol, pol_sub_pol):
assert p == pol_p, f"({aug}, {p}, {mag}), ({pol_aug}, {pol_p}, {pol_mag})"
assert mag == pol_mag, f"({aug}, {p}, {mag}), ({pol_aug}, {pol_p}, {pol_mag})"
@augmentation
def yet_another_aug(data, _):
return data
sub_policies = [[(yet_another_aug, 0.5, None), (one.augmentation(mag_range=(0, i)), 0.24, i)]
for i in range(1, 107)]
bigger_policy = Policy(name="BiggerPolicy", num_magnitude_bins=200, sub_policies=sub_policies)
for i, (first, second) in enumerate(bigger_policy.sub_policies):
assert first[0].name == '000__yet_another_aug', f"{second[0].name}"
assert second[0].name == f'{(i + 1):03}__clashing_name', f"{second[0].name}"
def test_unused_arg_fail():
@pipeline_def(enable_conditionals=True, batch_size=5, num_threads=4, device_id=0, seed=43)
def pipeline():
encoded_image, _ = fn.readers.file(name="Reader", file_root=images_dir)
image = fn.decoders.image(encoded_image, device="mixed")
image_net_policy = auto_augment.get_image_net_policy()
return auto_augment.apply_auto_augment(image_net_policy, image, misspelled_kwarg=100)
msg = "The kwarg `misspelled_kwarg` is not used by any of the augmentations."
with assert_raises(Exception, glob=msg):
pipeline()
def test_empty_policy_fail():
@pipeline_def(enable_conditionals=True, batch_size=5, num_threads=4, device_id=0, seed=43)
def pipeline():
encoded_image, _ = fn.readers.file(name="Reader", file_root=images_dir)
image = fn.decoders.image(encoded_image, device="mixed")
return auto_augment.apply_auto_augment(Policy("ShouldFail", 9, []), image)
msg = ("Cannot run empty policy. Got Policy(name='ShouldFail', num_magnitude_bins=9, "
"sub_policies=[], augmentations={}) in `apply_auto_augment` call.")
with assert_raises(Exception, glob=msg):
pipeline()
def test_missing_shape_fail():
@pipeline_def(enable_conditionals=True, batch_size=5, num_threads=4, device_id=0, seed=43)
def pipeline():
encoded_image, _ = fn.readers.file(name="Reader", file_root=images_dir)
image = fn.decoders.image(encoded_image, device="mixed")
image_net_policy = auto_augment.get_image_net_policy(use_shape=True)
return auto_augment.apply_auto_augment(image_net_policy, image)
msg = "`translate_y` * provide it as `shape` argument to `apply_auto_augment` call"
with assert_raises(Exception, glob=msg):
pipeline()
def test_sub_policy_coalescing_matrix_correctness():
@augmentation(
mag_range=(0, 1),
randomly_negate=True,
)
def first(data, op_id_mag_id):
return op_id_mag_id
@augmentation(
mag_range=(1, 2),
randomly_negate=True,
)
def second(data, op_id_mag_id):
return op_id_mag_id
@augmentation(
mag_range=(2, 3), )
def third(data, op_id_mag_id):
return op_id_mag_id
def custom_policy():
return Policy("MyCustomPolicy", 14, [
[(third, 0.5, 1), (second, 1., 2), (first, 0.2, 3), (second, 0.25, 4)],
[(first, 0.6, 5)],
[(second, 0.1, 6), (third, 0.7, 7)],
[(second, 0.8, 8), (first, 0.9, 9), (second, 1., 10), (third, 0.2, 11),
(first, 0.5, 12), (first, 1., 13)],
])
predefined_policies = [
auto_augment.get_image_net_policy, auto_augment.get_reduced_image_net_policy,
auto_augment.get_svhn_policy, auto_augment.get_reduced_cifar10_policy, custom_policy
]
for policy_getter in predefined_policies:
policy = policy_getter()
matrix, augments = auto_augment._sub_policy_to_augmentation_matrix_map(policy)
max_sub_policy_len = max(len(sub_policy) for sub_policy in policy.sub_policies)
expected_shape = (len(policy.sub_policies), max_sub_policy_len)
assert matrix.shape == expected_shape, f"{matrix.shape} {expected_shape} {policy}"
for i, sub_policy in enumerate(policy.sub_policies):
for stage_idx in range(max_sub_policy_len):
mat_aug = augments[stage_idx][matrix[i, stage_idx]]
if stage_idx < len(sub_policy):
sub_pol_aug, _, _ = sub_policy[stage_idx]
assert mat_aug is sub_pol_aug, \
f"{i} {stage_idx} {mat_aug} {sub_pol_aug} {policy}"
else:
assert mat_aug is a.identity, f"{i} {stage_idx} {mat_aug} {policy}"
def test_wrong_sub_policy_format_fail():
with assert_raises(Exception,
glob="The `num_magnitude_bins` must be a positive integer, got 0"):
Policy("ShouldFail", 0.25, a.rotate)
with assert_raises(Exception,
glob="The `sub_policies` must be a list or tuple of sub policies"):
Policy("ShouldFail", 9, a.rotate)
with assert_raises(Exception, glob="Each sub policy must be a list or tuple"):
Policy("ShouldFail", 9, [a.rotate])
with assert_raises(
Exception,
glob="as a triple: (augmentation, probability, magnitude). Got Augmentation"):
Policy("ShouldFail", 9, [(a.rotate, a.shear_x)])
with assert_raises(Exception, glob="must be an instance of Augmentation. Got `0.5`"):
Policy("ShouldFail", 9, [[(0.5, a.rotate, 3)]])
with assert_raises(Exception,
glob="Probability * must be a number from `[[]0, 1[]]` range. Got `2`"):
Policy("ShouldFail", 9, [[(a.rotate, 2, 2)]])
with assert_raises(Exception, glob="Magnitude ** `[[]0, 8[]]` range. Got `-1`"):
Policy("ShouldFail", 9, [[(a.rotate, 1, -1)]])
@augmentation(mag_range=(0, 250))
def parametrized_aug(data, magnitude):
return data
@augmentation
def non_parametrized_aug(data, _):
return data
with assert_raises(Exception, glob="the magnitude bin is required"):
Policy("ShouldFail", 7, [[(parametrized_aug, 0.5, None)]])
with assert_warns(glob="probability 0 in one of the sub-policies"):
Policy("ShouldFail", 7, [[(parametrized_aug, 0, 5)]])
with assert_warns(glob="probability 0 in one of the sub-policies"):
Policy("ShouldFail", 7, [[(parametrized_aug, 0., 5)]])
with assert_warns(glob="The augmentation does not accept magnitudes"):
Policy("ShouldFail", 7, [[(non_parametrized_aug, 1., 5)]])
|
DALI-main
|
dali/test/python/auto_aug/test_auto_augment.py
|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import random
import unittest
import numpy as np
from scipy.stats import chisquare
from nose2.tools import params
from nvidia.dali import fn, types
from nvidia.dali import pipeline_def
from nvidia.dali.auto_aug import trivial_augment
from nvidia.dali.auto_aug.core import augmentation
from test_utils import get_dali_extra_path, check_batch
data_root = get_dali_extra_path()
images_dir = os.path.join(data_root, 'db', 'single', 'jpeg')
vid_dir = os.path.join(data_root, "db", "video", "sintel", "video_files")
vid_files = ["sintel_trailer-720p_2.mp4"]
vid_filenames = [os.path.join(vid_dir, vid_file) for vid_file in vid_files]
@params(*tuple(
enumerate(
itertools.product(("cpu", "gpu"), (True, False), (True, False), (None, 0), (True, False)))))
def test_run_trivial(i, args):
dev, uniformly_resized, use_shape, fill_value, specify_translation_bounds = args
batch_sizes = [1, 8, 7, 64, 13, 64, 41]
num_magnitude_bin_cases = [1, 11, 31, 40]
batch_size = batch_sizes[i % len(batch_sizes)]
num_magnitude_bins = num_magnitude_bin_cases[i % len(num_magnitude_bin_cases)]
@pipeline_def(enable_conditionals=True, batch_size=batch_size, num_threads=4, device_id=0,
seed=43)
def pipeline():
encoded_image, _ = fn.readers.file(name="Reader", file_root=images_dir)
image = fn.decoders.image(encoded_image, device="cpu" if dev == "cpu" else "mixed")
if uniformly_resized:
image = fn.resize(image, size=(244, 244))
extra = {} if not use_shape else {"shape": fn.peek_image_shape(encoded_image)}
if fill_value is not None:
extra["fill_value"] = fill_value
if specify_translation_bounds:
if use_shape:
extra["max_translate_rel"] = 0.9
else:
extra["max_translate_abs"] = 400
image = trivial_augment.trivial_augment_wide(image, num_magnitude_bins=num_magnitude_bins,
**extra)
return image
# run the pipeline twice to make sure instantiation preserves determinism
p1 = pipeline()
p1.build()
p2 = pipeline()
p2.build()
for _ in range(3):
out1, = p1.run()
out2, = p2.run()
check_batch(out1, out2)
class VideoTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
num_frames = 31
roi_start = (90, 0)
roi_end = (630, 1280)
size_1 = (223, 367)
size_2 = (215, 220)
@pipeline_def(batch_size=6, device_id=0, num_threads=4, seed=42)
def pipeline(size):
video = fn.readers.video_resize(
filenames=vid_filenames,
sequence_length=num_frames,
roi_start=roi_start,
roi_end=roi_end,
resize_x=size[1],
resize_y=size[0],
file_list_include_preceding_frame=True,
device='gpu',
)
return video
cls.vid_files = []
for size in (size_1, size_2):
p = pipeline(size=size)
p.build()
out, = p.run()
cls.vid_files.extend(np.array(sample) for sample in out.as_cpu())
@params(*tuple(
enumerate((
("cpu", 6, False, 1),
("cpu", 1, True, 10),
("gpu", 12, True, None),
("gpu", 4, False, 101),
))))
def test_uniform(self, i, args):
device, batch_size, use_shape, num_magnitude_bins = args
num_iterations = 3
assert device in ("gpu", "cpu")
@pipeline_def(batch_size=batch_size, device_id=0, num_threads=4, seed=205,
enable_conditionals=True)
def pipeline():
rng = random.Random(42 + i)
video = fn.external_source(
source=lambda: list(rng.choices(self.vid_files, k=batch_size)), batch=True,
layout="FHWC")
extra = {} if not use_shape else {"shape": fn.shapes(video)[1:]}
if num_magnitude_bins is not None:
extra["num_magnitude_bins"] = num_magnitude_bins
if device == "gpu":
video = video.gpu()
video = trivial_augment.trivial_augment_wide(video, **extra)
return video
# run the pipeline twice to make sure instantiation preserves determinism
p1 = pipeline()
p1.build()
p2 = pipeline()
p2.build()
for _ in range(num_iterations):
out1, = p1.run()
out2, = p2.run()
check_batch(out1, out2)
@params(*tuple(itertools.product(
['cpu', 'gpu'],
[True, False],
[1, 3, 7],
[2, 3, 7],
)))
def test_ops_mags_selection(dev, use_sign, num_magnitude_bins, num_ops):
# the chisquare expects at least 5 elements in a bin and we can have around
# num_magnitude_bins * num_ops * (2**use_signs)
batch_size = 2048
def mag_to_param_with_op_id(op_id):
def mag_to_param(magnitude):
return np.array([op_id, magnitude], dtype=np.int32)
return mag_to_param
@augmentation(param_device=dev)
def op(data, op_id_mag_id):
return fn.cat(data, op_id_mag_id)
augmentations = [
op.augmentation(mag_range=(10 * i + 1, 10 * i + num_magnitude_bins),
mag_to_param=mag_to_param_with_op_id(i + 1), randomly_negate=use_sign
and i % 3 == 0) for i in range(num_ops)
]
expected_counts = {}
prob = 1. / (num_ops * num_magnitude_bins)
for aug in augmentations:
magnitudes = aug._get_magnitudes(num_magnitude_bins)
assert len(magnitudes) == num_magnitude_bins
for mag in magnitudes:
if not aug.randomly_negate:
expected_counts[tuple(aug.mag_to_param(mag))] = prob
else:
expected_counts[tuple(aug.mag_to_param(mag))] = prob / 2
expected_counts[tuple(aug.mag_to_param(-mag))] = prob / 2
expected_counts = {output: p * batch_size for output, p in expected_counts.items()}
@pipeline_def(enable_conditionals=True, batch_size=batch_size, num_threads=4, device_id=0,
seed=42)
def pipeline():
data = types.Constant([], dtype=types.INT32)
if dev == "gpu":
data = data.gpu()
data = trivial_augment.apply_trivial_augment(augmentations, data,
num_magnitude_bins=num_magnitude_bins)
return data
p = pipeline()
p.build()
stats = []
for i in range(3):
output, = p.run()
output = [np.array(s) for s in (output.as_cpu() if dev == "gpu" else output)]
actual_count = {allowed_out: 0 for allowed_out in expected_counts}
for sample in output:
actual_count[tuple(sample)] += 1
actual = []
expected = []
for out in expected_counts:
actual.append(actual_count[out])
expected.append(expected_counts[out])
stat = chisquare(actual, expected)
stats.append(stat)
mean_p_val = sum(stat.pvalue for stat in stats) / len(stats)
assert 0.05 <= mean_p_val <= 0.95, f"{mean_p_val} {stat} {actual} {expected}"
|
DALI-main
|
dali/test/python/auto_aug/test_trivial_augment.py
|
from nvidia.dali.plugin.triton import autoserialize
from nvidia.dali import pipeline_def
@autoserialize
@pipeline_def(batch_size=1, num_threads=1, device_id=0)
def func_under_test():
import numpy as np # noqa: F401
return 42
|
DALI-main
|
dali/test/python/autoserialize_test/custom_module_inside.py
|
from nvidia.dali.plugin.triton import autoserialize
from nvidia.dali import pipeline_def
@autoserialize
@pipeline_def(max_batch_size=1, num_threads=1, device_id=0)
def func_under_test():
return 42
@autoserialize
@pipeline_def(max_batch_size=1, num_threads=1, device_id=0)
def func_that_shouldnt_be_here():
return 42
|
DALI-main
|
dali/test/python/autoserialize_test/double_decorated_functions.py
|
from . import decorated_function # noqa: F401
|
DALI-main
|
dali/test/python/autoserialize_test/imports_decorated_function.py
|
from nvidia.dali.plugin.triton import autoserialize
from nvidia.dali import pipeline_def
@autoserialize
@pipeline_def(batch_size=1, num_threads=1, device_id=0)
def func_under_test():
return 42
|
DALI-main
|
dali/test/python/autoserialize_test/decorated_function.py
|
from nvidia.dali.plugin.triton import autoserialize
@autoserialize
def func_under_test():
return 42
|
DALI-main
|
dali/test/python/autoserialize_test/improper_decorated_function.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali as dali
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import numpy as np
import os.path
from test_utils import check_batch
import PIL.Image
try:
from PIL.Image.Resampling import NEAREST, BILINEAR, BICUBIC, LANCZOS
except Exception:
# Deprecated import, needed for Python 3.6
from PIL.Image import NEAREST, BILINEAR, BICUBIC, LANCZOS
def init_video_data():
batch_size = 2
video_directory = os.path.join(os.environ['DALI_EXTRA_PATH'],
"db", "video", "sintel", "video_files")
video_files = [os.path.join(video_directory, f) for f in sorted(os.listdir(video_directory))]
video_pipe = dali.pipeline.Pipeline(batch_size, 3, 0, seed=16)
with video_pipe:
input = fn.readers.video(device="gpu", filenames=video_files, sequence_length=32, stride=5)
video_pipe.set_outputs(input)
video_pipe.build()
out = video_pipe.run()
in_seq = out[0].as_cpu().at(0)
return in_seq
frames_fhwc = init_video_data()
frames_fchw = frames_fhwc.transpose([0, 3, 1, 2])
def GetSequences(channel_first, length, batch_size):
""" gets overlapping sequences, starting at iteration number """
source = frames_fchw if channel_first else frames_fhwc
N = source.shape[0]
def get_seq(id):
ret = []
for k in range(length):
i = (id + k) % N
ret.append(source[i])
return np.array(ret)
def get_batch(iter):
return [get_seq(iter * batch_size + i) for i in range(batch_size)]
return get_batch
resample_dali2pil = {
types.INTERP_NN: NEAREST,
types.INTERP_TRIANGULAR: BILINEAR,
types.INTERP_CUBIC: BICUBIC,
types.INTERP_LANCZOS3: LANCZOS
}
def resize_PIL(channel_first, interp, w, h):
pil_resample = resample_dali2pil[interp]
def resize(input):
num_frames = input.shape[0]
out_seq = []
for i in range(num_frames):
frame = input[i]
if channel_first:
frame = frame.transpose([1, 2, 0])
out_frame = PIL.Image.fromarray(frame).resize([w, h], resample=pil_resample)
out_frame = np.array(out_frame)
if channel_first:
out_frame = out_frame.transpose([2, 0, 1])
out_seq.append(out_frame)
return np.array(out_seq)
return resize
def create_ref_pipe(channel_first, seq_len, interp, dtype, w, h, batch_size=2):
pipe = dali.pipeline.Pipeline(batch_size, 1, 0, 0, exec_async=False, exec_pipelined=False)
with pipe:
layout = "FCHW" if channel_first else "FHWC"
ext = fn.external_source(GetSequences(channel_first, seq_len, batch_size), layout=layout)
pil_resized = fn.python_function(ext, function=resize_PIL(channel_first, interp, w, h),
batch_processing=False)
if dtype is not None: # unfortunately, PIL can't quite handle that
pil_resized = fn.cast(pil_resized, dtype=dtype)
pil_resized = fn.reshape(pil_resized, layout=layout)
pipe.set_outputs(pil_resized)
return pipe
def create_dali_pipe(channel_first, seq_len, interp, dtype, w, h, batch_size=2):
pipe = dali.pipeline.Pipeline(batch_size, 1, 0, 0)
with pipe:
layout = "FCHW" if channel_first else "FHWC"
ext = fn.external_source(GetSequences(channel_first, seq_len, batch_size), layout=layout)
resize_cpu_out = fn.resize(ext, resize_x=w, resize_y=h, interp_type=interp,
dtype=dtype, save_attrs=True)
resize_gpu_out = fn.resize(ext.gpu(), resize_x=w, resize_y=h, interp_type=interp,
minibatch_size=4, dtype=dtype, save_attrs=True)
dali_resized_cpu, size_cpu = resize_cpu_out
dali_resized_gpu, size_gpu = resize_gpu_out
# extract just HW part from the input shape
ext_size = fn.slice(fn.cast(fn.shapes(ext), dtype=types.INT32),
2 if channel_first else 1, 2, axes=[0])
pipe.set_outputs(dali_resized_cpu, dali_resized_gpu, ext_size, size_cpu, size_gpu)
return pipe
def _test_resize(layout, interp, dtype, w, h):
channel_first = (layout == "FCHW")
pipe_dali = create_dali_pipe(channel_first, 8, interp, dtype, w, h)
pipe_dali.build()
pipe_ref = create_ref_pipe(channel_first, 8, interp, dtype, w, h)
pipe_ref.build()
eps = 1e-2
max_err = 6
for iter in range(4):
out_dali = pipe_dali.run()
out_ref = pipe_ref.run()[0]
dali_cpu = out_dali[0]
dali_gpu = out_dali[1]
if interp == types.INTERP_LANCZOS3:
# PIL can't resize float data. Lanczos resamling generates overshoot which we have
# to get rid of for the comparison to succeed.
dali_cpu = [np.array(x).clip(0, 255) for x in dali_cpu]
dali_gpu = [np.array(x).clip(0, 255) for x in dali_gpu.as_cpu()]
else:
dali_cpu = [np.array(x) for x in dali_cpu]
dali_gpu = [np.array(x) for x in dali_gpu.as_cpu()]
if channel_first:
out_ref = [np.array(x)[:, :, 1:-1, 1:-1] for x in out_ref]
dali_gpu = [x[:, :, 1:-1, 1:-1] for x in dali_gpu]
dali_cpu = [x[:, :, 1:-1, 1:-1] for x in dali_cpu]
else:
out_ref = [np.array(x)[:, 1:-1, 1:-1, :] for x in out_ref]
dali_gpu = [x[:, 1:-1, 1:-1, :] for x in dali_gpu]
dali_cpu = [x[:, 1:-1, 1:-1, :] for x in dali_cpu]
check_batch(dali_cpu, out_ref, 2, eps=eps, max_allowed_error=max_err)
check_batch(dali_gpu, out_ref, 2, eps=eps, max_allowed_error=max_err)
ext_size = out_dali[2]
size_cpu = out_dali[3]
size_gpu = out_dali[4]
check_batch(ext_size, size_cpu, 2)
check_batch(ext_size, size_gpu, 2)
def test_resize():
channel_first = False
for interp, w, h in [(types.INTERP_NN, 640, 480),
(types.INTERP_TRIANGULAR, 100, 80),
(types.INTERP_LANCZOS3, 200, 100)]:
for dtype in [None, types.UINT8, types.FLOAT]:
layout = "FCHW" if channel_first else "FHWC"
channel_first = not channel_first # alternating pattern cuts number of cases by half
yield _test_resize, layout, interp, dtype, w, h
|
DALI-main
|
dali/test/python/operator_2/test_resize_seq.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import functools
import math
import numpy as np
import nvidia.dali as dali
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import os.path
from nvidia.dali import Pipeline, pipeline_def
from nvidia.dali.data_node import DataNode as _DataNode
from test_utils import check_batch, get_dali_extra_path, as_array
import PIL.Image
try:
from PIL.Image.Resampling import NEAREST, BILINEAR, BICUBIC, LANCZOS
except Exception:
# Deprecated import, needed for Python 3.6
from PIL.Image import NEAREST, BILINEAR, BICUBIC, LANCZOS
resample_dali2pil = {
types.INTERP_NN: NEAREST,
types.INTERP_TRIANGULAR: BILINEAR,
types.INTERP_CUBIC: BICUBIC,
types.INTERP_LANCZOS3: LANCZOS
}
test_data_root = get_dali_extra_path()
db_2d_folder = os.path.join(test_data_root, 'db', 'lmdb')
db_3d_folder = os.path.join(test_data_root, *'db/3D/MRI/Knee/Jpegs/STU00001'.split('/'))
class random_3d_loader():
def __init__(self, batch_size):
np.random.seed(12345)
self.subdirs = ['SER00004', 'SER00006', 'SER00008', 'SER00009', 'SER00011', 'SER00015']
self.dirs = [os.path.join(db_3d_folder, x) for x in self.subdirs]
self.batch_size = batch_size
np.random.seed(1234)
self.n = 0
self.order = list(range(len(self.subdirs)))
np.random.shuffle(self.order)
def __iter__(self):
return self
def __next__(self):
return [self.get_one() for _ in range(self.batch_size)]
def get_one(self):
idx = self.get_index()
dir = self.dirs[idx]
imgs = []
i = 0
path = os.path.join(dir, "%i.jpg")
while True:
fname = path % i
img = cv2.imread(fname, cv2.IMREAD_GRAYSCALE)
if img is None:
break
i += 1
imgs.append(img[::-1, :, np.newaxis])
return np.stack(imgs, axis=0)
def get_index(self):
if self.n >= len(self.order):
np.random.shuffle(self.order)
self.n = 0
idx = self.order[self.n]
self.n += 1
return idx
def layout_str(dim, channel_first):
s = "DHW" if dim == 3 else "HW"
s = "C" + s if channel_first else s + "C"
return s
def resize2D_PIL(input, size, roi_start, roi_end, dtype, channel_first, resample):
if channel_first:
input = input.transpose([1, 2, 0])
size = list(reversed(size.astype(np.int32).tolist()))
roi_start = reversed(roi_start.tolist())
roi_end = reversed(roi_end.tolist())
box = list(roi_start) + list(roi_end)
has_overshoot = resample in (LANCZOS, BICUBIC)
if has_overshoot:
# compress dynamic range to allow for overshoot
input = (64 + input * 0.5).round().astype(np.uint8)
out = PIL.Image.fromarray(input).resize(size, box=box, resample=resample)
out = np.array(out)
if channel_first:
out = out.transpose([2, 0, 1])
if has_overshoot:
out = ((out.astype(np.float32) - 64) * 2.0)
if dtype == np.uint8:
out = out.round().clip(0, 255).astype(np.uint8)
elif dtype == types.FLOAT:
out = out.astype(np.float32)
return out
def resize3D_PIL(input, size, roi_start, roi_end, dtype, channel_first, resample):
size = list(size)
if channel_first:
input = input.transpose([1, 2, 3, 0])
has_overshoot = resample in (LANCZOS, BICUBIC)
if has_overshoot:
# compress dynamic range to allow for overshoot
input = (64 + input * 0.5).round().astype(np.uint8)
mono = input.shape[3] == 1
# First, slice along Z dimension and resize the XY slices
sizeXY = [size[2], size[1]]
boxXY = [roi_start[2], roi_start[1], roi_end[2], roi_end[1]]
tmp = np.zeros([input.shape[0], size[1], size[2], input.shape[3]], dtype=np.uint8)
for z in range(input.shape[0]):
in_slice = input[z, :, :, 0] if mono else input[z]
out_slice = np.array(
PIL.Image.fromarray(in_slice).resize(sizeXY, box=boxXY, resample=resample))
tmp[z] = out_slice[:, :, np.newaxis] if mono else out_slice
# Then, slice along Y and resize XZ slices
sizeXZ = [size[2], size[0]]
boxXZ = [0, roi_start[0], size[2], roi_end[0]]
out = np.zeros(size + [input.shape[3]], dtype=np.uint8)
for y in range(size[1]):
in_slice = tmp[:, y, :, 0] if mono else tmp[:, y]
out_slice = np.array(
PIL.Image.fromarray(in_slice).resize(sizeXZ, box=boxXZ, resample=resample))
out[:, y, :, :] = out_slice[:, :, np.newaxis] if mono else out_slice
# Restore dynamic range, losing some bit depth
if has_overshoot:
out = ((out.astype(np.float32) - 64) * 2.0)
if dtype == np.uint8:
out = out.round().clip(0, 255).astype(np.uint8)
elif dtype == types.FLOAT:
out = out.astype(np.float32)
if channel_first:
out = out.transpose([3, 0, 1, 2])
return out
def resize_PIL(dim, channel_first, dtype, interp, data, size, roi_start, roi_end):
pil_resample = resample_dali2pil[interp]
assert dtype == types.UINT8 or dtype == types.FLOAT
dtype = np.uint8 if dtype == types.UINT8 else np.float32
base_func = resize3D_PIL if dim == 3 else resize2D_PIL
f = functools.partial(base_func, channel_first=channel_first, dtype=dtype,
resample=pil_resample)
return dali.fn.python_function(data, size, roi_start, roi_end, function=f,
batch_processing=False)
def resize_dali(input, channel_first, dtype, interp, mode, size, w, h, d, roi_start, roi_end,
minibatch_size, max_size):
return fn.resize(input, interp_type=interp, dtype=dtype, mode=mode, resize_x=w, resize_y=h,
resize_z=d, size=size, roi_start=roi_start, roi_end=roi_end,
minibatch_size=minibatch_size, max_size=max_size,
subpixel_scale=False) # disable subpixel scale so we can use PIL as reference
# Note: PIL supports ROI, but, unlike DALI, does not support overscan. DALI routinely overscans
# on a subpixel level in about half of the cases, when adjusting ROI to keep subpixel aspect
# ratio. This precludes the use of PIL as reference for subpixel_scale.
def ref_output_size(mode, requested_size, roi_size, max_size=None):
"""Returns ideal (non-rounded) output size that would result from the parameters.
The result is not rounded, so we can check the real, rounded, value against this one and find
rounding errors by setting epsilon <1."""
roi_size = list(roi_size)
dim = len(roi_size)
if max_size is None:
max_size = [math.inf] * dim
elif not isinstance(max_size, (list, tuple, np.ndarray)):
max_size = [max_size] * dim
elif isinstance(max_size, np.ndarray) and max_size.shape == []:
max_size = [float(max_size)] * dim
if not isinstance(requested_size, (list, tuple, np.ndarray)):
requested_size = [requested_size] * dim
elif isinstance(requested_size, np.ndarray) and requested_size.shape == []:
requested_size = [float(requested_size)] * dim
requested_size = [abs(x) if x else None for x in requested_size]
roi_size = [abs(x) for x in roi_size]
if not any(requested_size):
return roi_size
if mode == "stretch":
return [min(m, o or i) for o, i, m in zip(requested_size, roi_size, max_size)]
elif mode == "not_smaller":
max_scale = 0
for o, i in zip(requested_size, roi_size):
if not o:
continue
max_scale = max(max_scale, abs(o / i))
for i in range(len(roi_size)):
max_scale = min(max_scale, max_size[i] / abs(roi_size[i]))
return [x * max_scale for x in roi_size]
elif mode == "not_larger":
min_scale = math.inf
for o, i, m in zip(requested_size, roi_size, max_size):
if not o:
min_scale = min(min_scale, m / i)
continue
min_scale = min(min_scale, abs(min(m, o) / i))
return [x * min_scale for x in roi_size]
elif mode == "default" or mode is None:
avg_scale = 1
power = 0
for o, i in zip(requested_size, roi_size):
if o:
avg_scale *= abs(o / i)
power += 1
if power == len(requested_size):
return [min(o, m) for o, m in zip(requested_size, max_size)]
if power > 1:
avg_scale = math.pow(avg_scale, 1 / power)
out = [min(m, o or avg_scale * i) for o, i, m in zip(requested_size, roi_size, max_size)]
return out
else:
raise ValueError("Invalid mode '{}'".format(mode))
def test_ref_size():
r = ref_output_size("not_smaller", [600, 600], [640, 480], 720)
assert r == [720, 540]
r = ref_output_size("not_larger", [600, 500], [640, 480], 720)
assert r == [600, 450]
r = ref_output_size("stretch", [600, 500], [640, 480], [1000, 300])
assert r == [600, 300]
r = ref_output_size("default", [600, 0], [640, 480])
assert r == [600, 450]
r = ref_output_size("default", [0, 600], [640, 480])
assert r == [800, 600]
r = ref_output_size("default", [80, 0, 20], [10, 10, 10])
assert r == [80, 40, 20]
def max_size(dim):
return 200 if dim == 3 else None
def build_pipes(device, dim, batch_size, channel_first, mode, interp, dtype, w_input, h_input,
d_input, use_size_arg, use_size_input, use_roi):
dali_pipe = Pipeline(batch_size=batch_size, num_threads=8, device_id=0, seed=1234)
with dali_pipe:
if dim == 2:
files, labels = dali.fn.readers.caffe(path=db_2d_folder, random_shuffle=True)
images_cpu = dali.fn.decoders.image(files, device="cpu")
else:
images_cpu = dali.fn.external_source(source=random_3d_loader(batch_size), layout="DHWC")
images_hwc = images_cpu if device == "cpu" else images_cpu.gpu()
if channel_first:
images = dali.fn.transpose(images_hwc, perm=[3, 0, 1, 2] if dim == 3 else [2, 0, 1],
transpose_layout=True)
else:
images = images_hwc
roi_start = None
roi_end = None
w = None
h = None
d = None
size = None
minibatch_size = 2 if dim == 3 else 8
if use_roi:
# Calculate absolute RoI
in_size = fn.slice(fn.shapes(images_cpu),
types.Constant(0, dtype=types.FLOAT, device="cpu"),
types.Constant(dim, dtype=types.FLOAT, device="cpu"), axes=[0],
normalized_shape=False)
roi_start = fn.random.uniform(range=(0, 0.4), shape=[dim]) * in_size
roi_end = fn.random.uniform(range=(0.6, 1.0), shape=[dim]) * in_size
size_range = (10, 200) if dim == 3 else (10, 1000)
if use_size_arg:
if use_size_input:
mask = fn.cast(fn.random.uniform(range=(0.8, 1.9), shape=[dim]), dtype=types.INT32)
size = fn.random.uniform(range=size_range, shape=[dim]) * mask
else:
size = [300, 400] if dim == 2 else [80, 100, 120]
resized = resize_dali(images, channel_first, dtype, interp, mode, size, None, None,
None, roi_start, roi_end, minibatch_size=minibatch_size,
max_size=max_size(dim))
else:
if w_input:
has_w = fn.random.coin_flip(probability=0.8)
w = fn.random.uniform(range=size_range) * has_w
else:
w = 320 # some fixed value
if h_input:
has_h = fn.random.coin_flip(probability=0.8)
h = fn.random.uniform(range=size_range) * has_h
else:
h = 240 # some other fixed value
if dim >= 3:
if d_input:
has_d = fn.random.coin_flip(probability=0.8)
d = fn.random.uniform(range=size_range) * has_d
else:
d = 31 # some other fixed value
resized = resize_dali(images, channel_first, dtype, interp, mode, None, w, h, d,
roi_start, roi_end, minibatch_size=minibatch_size,
max_size=max_size(dim))
outputs = [images, resized]
if roi_start is not None and roi_end is not None:
outputs += [roi_start, roi_end]
for x in (d, h, w, size):
if x is not None:
if isinstance(x, _DataNode):
outputs.append(x)
else:
outputs.append(types.Constant(np.array(x, dtype=np.float32)))
dali_pipe.set_outputs(*outputs)
pil_pipe = Pipeline(batch_size=batch_size, num_threads=8, device_id=0, exec_async=False,
exec_pipelined=False)
with pil_pipe:
images = fn.external_source(name="images", layout=layout_str(dim, channel_first))
sizes = fn.external_source(name="size")
roi_start = fn.external_source(name="roi_start")
roi_end = fn.external_source(name="roi_end")
resized = resize_PIL(dim, channel_first, dtype, interp, images, sizes, roi_start, roi_end)
resized = fn.reshape(resized, layout=layout_str(dim, channel_first))
pil_pipe.set_outputs(resized)
dali_pipe.build()
pil_pipe.build()
return dali_pipe, pil_pipe
def interior(array, channel_first):
array = np.array(array)
channel_dim = 0 if channel_first else len(array.shape) - 1
r = []
for d in range(len(array.shape)):
if d == channel_dim or array.shape[d] <= 2:
r.append(slice(array.shape[d]))
else:
r.append(slice(1, -1))
return array[tuple(r)]
def _test_ND(device, dim, batch_size, channel_first, mode, interp, dtype, w_input, h_input, d_input,
use_size_arg, use_size_input, use_roi):
dali_pipe, pil_pipe = build_pipes(device, dim, batch_size, channel_first, mode, interp, dtype,
w_input, h_input, d_input, use_size_arg, use_size_input,
use_roi)
first_spatial_dim = 1 if channel_first else 0
max_iters = 3
for iter in range(max_iters):
o = dali_pipe.run()
output_idx = 0
def get_outputs(n):
nonlocal output_idx
start = output_idx
output_idx += n
return o[start:output_idx]
def get_output():
return get_outputs(1)[0]
dali_in, dali_out = get_outputs(2)
if use_roi:
roi_start, roi_end = (np.array(x.as_tensor(), dtype=np.float32) for x in get_outputs(2))
else:
roi_end = np.stack(
[dali_in[i].shape()[first_spatial_dim:first_spatial_dim + dim] for i in
range(batch_size)]).astype(np.float32)
roi_start = np.zeros([batch_size, dim], dtype=np.float32)
if use_size_arg:
size = np.array(get_output().as_tensor(), np.float32)
else:
size = np.stack([x.as_tensor() for x in get_outputs(dim)], axis=1)
roi_size = roi_end - roi_start
dali_out_size = np.stack(
[dali_out[i].shape()[first_spatial_dim:first_spatial_dim + dim] for i in
range(batch_size)])
for i in range(batch_size):
ref_size = ref_output_size(mode, size[i], roi_size[i], max_size(dim))
real_size = dali_out_size[i]
max_err = np.max(np.abs(ref_size - real_size))
eps = 0.6 # allow for rounding errors - we'll use _real_ size when resizing with PIL
if max_err > eps:
print("Invalid output size!")
print(dali_out[i].shape())
print("Got: ", real_size)
print("Expected: ", ref_size)
print("RoI", roi_size[i])
print("Input size", dali_in[i].shape())
print("Requested output", size[i])
assert max_err <= eps
ref_in = dali_in
if isinstance(ref_in, dali.tensors.TensorListGPU):
ref_in = ref_in.as_cpu() # suppress warnings
pil_pipe.feed_input("images", ref_in, layout=layout_str(dim, channel_first))
pil_pipe.feed_input("size", dali_out_size)
pil_pipe.feed_input("roi_start", roi_start)
pil_pipe.feed_input("roi_end", roi_end)
ref = pil_pipe.run()
dali_resized = o[1]
if isinstance(dali_resized, dali.tensors.TensorListGPU):
dali_resized = dali_resized.as_cpu()
ref_resized = ref[0]
max_avg_err = 0.6 if dim == 3 else 0.4
max_err = 12 if dim == 3 else 10
if interp == types.INTERP_LANCZOS3:
max_err *= 2
dali_interior = [interior(x, channel_first) for x in dali_resized]
ref_interior = [interior(x, channel_first) for x in ref_resized]
check_batch(dali_interior, ref_interior, batch_size, max_avg_err, max_err)
def _tests(dim, device):
batch_size = 2 if dim == 3 else 10
# - Cannot test linear against PIL, because PIL uses triangular filter when downscaling
# - Cannot test Nearest Neighbor because rounding errors cause gross discrepancies (pixel shift)
for mode in ["default", "stretch", "not_smaller", "not_larger"]:
for interp, dtype, channel_first, use_size_arg, use_size_input, w_input, h_input, d_input, use_roi in [ # noqa: E501
(0, types.UINT8, True, False, False, False, False, False, False),
(1, types.FLOAT, False, False, False, False, True, True, True),
(0, types.FLOAT, True, False, False, True, True, False, True),
(1, types.FLOAT, False, False, False, True, False, True, False),
(0, types.UINT8, True, True, False, False, False, False, True),
(1, types.UINT8, False, True, True, False, False, False, False)
]:
interp = [types.INTERP_TRIANGULAR, types.INTERP_LANCZOS3][interp]
yield _test_ND, device, dim, batch_size, False, mode, interp, dtype, \
w_input, h_input, d_input, use_size_arg, use_size_input, use_roi
def test_2D_gpu():
for f, *args in _tests(2, "gpu"):
yield (f, *args)
def test_3D_gpu():
for f, *args in _tests(3, "gpu"):
yield (f, *args)
def test_2D_cpu():
for f, *args in _tests(2, "cpu"):
yield (f, *args)
def test_3D_cpu():
for f, *args in _tests(3, "cpu"):
yield (f, *args)
def _test_stitching(device, dim, channel_first, dtype, interp):
batch_size = 1 if dim == 3 else 10
pipe = dali.pipeline.Pipeline(batch_size=batch_size, num_threads=1, device_id=0, seed=1234,
prefetch_queue_depth=1)
with pipe:
if dim == 2:
files, labels = dali.fn.readers.caffe(path=db_2d_folder, random_shuffle=True)
images_cpu = dali.fn.decoders.image(files, device="cpu")
else:
images_cpu = dali.fn.external_source(source=random_3d_loader(batch_size), layout="DHWC")
images_hwc = images_cpu if device == "cpu" else images_cpu.gpu()
if channel_first:
images = dali.fn.transpose(images_hwc, perm=[3, 0, 1, 2] if dim == 3 else [2, 0, 1],
transpose_layout=True)
else:
images = images_hwc
out_size_full = [32, 32, 32] if dim == 3 else [160, 160]
out_size_half = [x // 2 for x in out_size_full]
roi_start = [0] * dim
roi_end = [1] * dim
resized = fn.resize(images, dtype=dtype, min_filter=interp, mag_filter=interp,
size=out_size_full)
outputs = [resized]
for z in range(dim - 1):
if dim == 3:
roi_start[0] = z * 0.5
roi_end[0] = (z + 1) * 0.5
for y in [0, 1]:
roi_start[-2] = y * 0.5
roi_end[-2] = (y + 1) * 0.5
for x in [0, 1]:
roi_start[-1] = x * 0.5
roi_end[-1] = (x + 1) * 0.5
part = fn.resize(images, dtype=dtype, interp_type=interp, size=out_size_half,
roi_start=roi_start, roi_end=roi_end, roi_relative=True)
outputs.append(part)
pipe.set_outputs(*outputs)
pipe.build()
for iter in range(1):
out = pipe.run()
if device == "gpu":
out = [x.as_cpu() for x in out]
whole = out[0]
tiled = []
for i in range(batch_size):
slices = []
for z in range(dim - 1):
q00 = out[1 + z * 4 + 0].at(i)
q01 = out[1 + z * 4 + 1].at(i)
q10 = out[1 + z * 4 + 2].at(i)
q11 = out[1 + z * 4 + 3].at(i)
if channel_first:
slices.append(np.block([[q00, q01],
[q10, q11]]))
else:
slices.append(np.block([[[q00], [q01]],
[[q10], [q11]]]))
if dim == 3:
if channel_first:
tiled.append(np.block([[[slices[0]]], [[slices[1]]]]))
else:
tiled.append(np.block([[[[slices[0]]]], [[[slices[1]]]]]))
else:
tiled.append(slices[0])
max_err = 1e-3 if type == types.FLOAT else 1
check_batch(tiled, whole, batch_size, 1e-4, max_err, compare_layouts=False)
def test_stitching():
for device in ["cpu", "gpu"]:
for dim in [3]:
for dtype in [types.UINT8, types.FLOAT]:
for channel_first in [False, True]:
for interp in [types.INTERP_LINEAR, types.INTERP_CUBIC, types.INTERP_TRIANGULAR,
types.INTERP_LANCZOS3]:
yield _test_stitching, device, dim, channel_first, dtype, interp
def _test_empty_input(dim, device):
batch_size = 8
pipe = Pipeline(batch_size=batch_size, num_threads=8, device_id=0, seed=1234)
if dim == 2:
files, labels = dali.fn.readers.caffe(path=db_2d_folder, random_shuffle=True)
images_cpu = dali.fn.decoders.image(files, device="cpu")
else:
images_cpu = dali.fn.external_source(source=random_3d_loader(batch_size), layout="DHWC")
images = images_cpu if device == "cpu" else images_cpu.gpu()
in_rel_shapes = np.ones([batch_size, dim], dtype=np.float32)
in_rel_shapes[::2, :] *= 0 # all zeros in every second sample
degenerate_images = fn.slice(images, np.zeros([dim]), fn.external_source(lambda: in_rel_shapes),
axes=list(range(dim)))
sizes = np.random.randint(20, 50, [batch_size, dim], dtype=np.int32)
size_inp = fn.external_source(lambda: [x.astype(np.float32) for x in sizes])
resize_no_empty = fn.resize(images, size=size_inp, mode="not_larger")
resize_with_empty = fn.resize(degenerate_images, size=size_inp, mode="not_larger")
pipe.set_outputs(resize_no_empty, resize_with_empty)
pipe.build()
for it in range(3):
out_no_empty, out_with_empty = pipe.run()
if device == "gpu":
out_no_empty = out_no_empty.as_cpu()
out_with_empty = out_with_empty.as_cpu()
for i in range(batch_size):
if i % 2 != 0:
assert np.array_equal(out_no_empty.at(i), out_with_empty.at(i))
else:
assert np.prod(out_with_empty.at(i).shape) == 0
def test_empty_input():
for device in ["cpu", "gpu"]:
for dim in [2, 3]:
yield _test_empty_input, dim, device
def _test_very_small_output(dim, device):
batch_size = 8
pipe = Pipeline(batch_size=batch_size, num_threads=8, device_id=0, seed=1234)
if dim == 2:
files, labels = dali.fn.readers.caffe(path=db_2d_folder, random_shuffle=True)
images_cpu = dali.fn.decoders.image(files, device="cpu")
else:
images_cpu = dali.fn.external_source(source=random_3d_loader(batch_size), layout="DHWC")
images = images_cpu if device == "cpu" else images_cpu.gpu()
resize_tiny = fn.resize(images, size=1e-10)
pipe.set_outputs(resize_tiny)
pipe.build()
for it in range(3):
out, = pipe.run()
ref_size = [1, 1, 1, 1] if dim == 3 else [1, 1, 3]
for t in out:
assert t.shape() == ref_size
def test_very_small_output():
for device in ["cpu", "gpu"]:
for dim in [2, 3]:
yield _test_very_small_output, dim, device
def test_checkerboard_dali_vs_onnx_ref():
improc_data_dir = os.path.join(test_data_root, 'db', 'imgproc')
ref_dir = os.path.join(improc_data_dir, 'ref', 'resampling')
# Checker board with shape (22, 22) with 2x2 squares
checkerboard_file = os.path.join(improc_data_dir, 'checkerboard_22_22.npy')
checkerboard = np.load(checkerboard_file)
assert checkerboard.shape == (22, 22)
out_size = (17, 13)
out_size_str = '_'.join([str(n) for n in out_size])
ref_resized_linear_filename = os.path.join(ref_dir, f"checkerboard_linear_{out_size_str}.npy")
ref_resized_linear_antialias_filename = os.path.join(
ref_dir, f"checkerboard_linear_antialias_{out_size_str}.npy")
ref_resized_cubic_filename = os.path.join(ref_dir, f"checkerboard_cubic_{out_size_str}.npy")
ref_resized_cubic_antialias_filename = os.path.join(
ref_dir, f"checkerboard_cubic_antialias_{out_size_str}.npy")
# Reference generated with ONNX reference code. To regenerate uncomment
# from onnx.backend.test.case.node.resize import interpolate_nd, linear_coeffs, \
# linear_coeffs_antialias, cubic_coeffs, cubic_coeffs_antialias
#
# ref_resized_linear = interpolate_nd(checkerboard, lambda x, _: linear_coeffs(x),
# output_size=out_size)
# np.save(ref_resized_linear_filename, ref_resized_linear)
# ref_resized_linear_antialias = interpolate_nd(checkerboard, linear_coeffs_antialias,
# output_size=out_size)
# np.save(ref_resized_linear_antialias_filename, ref_resized_linear_antialias)
# ref_resized_cubic = interpolate_nd(checkerboard, lambda x, _: cubic_coeffs(x, A=-0.5),
# output_size=out_size)
# np.save(ref_resized_cubic_filename, ref_resized_cubic)
# ref_resized_cubic_antialias = interpolate_nd(checkerboard,
# lambda x, scale: cubic_coeffs_antialias(x, scale,
# A=-0.5),
# output_size=out_size)
# np.save(ref_resized_cubic_antialias_filename, ref_resized_cubic_antialias)
ref_resized_linear = np.load(ref_resized_linear_filename)
assert ref_resized_linear.shape == out_size
ref_resized_linear_antialias = np.load(ref_resized_linear_antialias_filename)
assert ref_resized_linear_antialias.shape == out_size
ref_resized_cubic = np.load(ref_resized_cubic_filename)
assert ref_resized_cubic.shape == out_size
ref_resized_cubic_antialias = np.load(ref_resized_cubic_antialias_filename)
assert ref_resized_cubic_antialias.shape == out_size
antialias_ON = True
antialias_OFF = False
ref_data = {
types.INTERP_LINEAR: {
antialias_OFF: ref_resized_linear,
antialias_ON: ref_resized_linear_antialias
},
types.INTERP_CUBIC: {
antialias_OFF: ref_resized_cubic,
antialias_ON: ref_resized_cubic_antialias
}
}
@pipeline_def(batch_size=1, num_threads=3, device_id=0)
def pipe(device, interp_type, antialias, test_data=checkerboard, out_size=out_size):
data = types.Constant(test_data, device=device)
data = fn.expand_dims(data, axes=[2])
resized = fn.resize(data, dtype=types.FLOAT, min_filter=interp_type, mag_filter=interp_type,
size=out_size, antialias=antialias)
resized = fn.squeeze(resized, axes=[2])
return resized
def impl(device, interp_type, antialias):
assert interp_type in ref_data
ref = ref_data[interp_type][antialias]
p = pipe(device, interp_type, antialias)
p.build()
out, = p.run()
out_dali = as_array(out[0])
abs_diff = np.abs(ref - out_dali)
max_error = np.max(abs_diff)
if max_error > 1:
suffix_str = 'cubic' if interp_type == types.INTERP_CUBIC else 'linear'
img1 = PIL.Image.fromarray(np.clip(ref, 0, 255).astype(np.uint8))
img1.save(f'ref_resized_{suffix_str}.png')
img2 = PIL.Image.fromarray(np.clip(out_dali, 0, 255).astype(np.uint8))
img2.save(f'dali_resized_{suffix_str}.png')
img2 = PIL.Image.fromarray(np.clip(127 + abs_diff, 0, 255).astype(np.uint8))
img2.save(f'diff_resized_{suffix_str}.png')
np.testing.assert_allclose(out_dali, ref, atol=1)
for device in ['cpu', 'gpu']:
for interp_type in [types.INTERP_LINEAR, types.INTERP_CUBIC]:
for antialias in [antialias_OFF, antialias_ON]:
yield impl, device, interp_type, antialias
|
DALI-main
|
dali/test/python/operator_2/test_resize.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import nvidia.dali as dali
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import random
from nose_utils import assert_raises
np.random.seed(4321)
def random_shape(min_sh, max_sh, ndim):
return np.array(
[np.random.randint(min_sh, max_sh) for s in range(ndim)],
dtype=np.int32
)
def batch_gen(max_batch_size, sample_shape_fn, dtype=np.float32):
bs = np.random.randint(1, max_batch_size)
data = []
for i in range(bs):
sample_sh = sample_shape_fn()
data += [np.zeros(sample_sh, dtype=dtype)]
return data
def check_roi_random_crop(ndim=2, max_batch_size=16,
roi_min_start=0, roi_max_start=100,
roi_min_extent=20, roi_max_extent=50,
crop_min_extent=20, crop_max_extent=50,
in_shape_min=400, in_shape_max=500,
niter=3):
pipe = dali.pipeline.Pipeline(batch_size=max_batch_size, num_threads=4, device_id=0, seed=1234)
with pipe:
assert in_shape_min < in_shape_max
def shape_gen_fn():
return random_shape(in_shape_min, in_shape_max, ndim)
def data_gen_f():
return batch_gen(max_batch_size, shape_gen_fn)
shape_like_in = dali.fn.external_source(data_gen_f, device='cpu')
in_shape = dali.fn.shapes(shape_like_in, dtype=types.INT32)
if random.choice([True, False]):
crop_shape = [(crop_min_extent + crop_max_extent) // 2] * ndim
else:
crop_shape = fn.random.uniform(range=(crop_min_extent, crop_max_extent + 1),
shape=(ndim, ), dtype=types.INT32, device='cpu')
if random.choice([True, False]):
roi_shape = [(roi_min_extent + roi_max_extent) // 2] * ndim
roi_start = [(roi_min_start + roi_max_start) // 2] * ndim
roi_end = [roi_start[d] + roi_shape[d] for d in range(ndim)]
else:
roi_shape = fn.random.uniform(range=(roi_min_extent, roi_max_extent + 1),
shape=(ndim,), dtype=types.INT32, device='cpu')
roi_start = fn.random.uniform(range=(roi_min_start, roi_max_start + 1),
shape=(ndim,), dtype=types.INT32, device='cpu')
roi_end = roi_start + roi_shape
outs = [
fn.roi_random_crop(crop_shape=crop_shape,
roi_start=roi_start, roi_shape=roi_shape,
device='cpu'),
fn.roi_random_crop(crop_shape=crop_shape,
roi_start=roi_start, roi_end=roi_end,
device='cpu'),
fn.roi_random_crop(shape_like_in, crop_shape=crop_shape,
roi_start=roi_start, roi_shape=roi_shape,
device='cpu'),
fn.roi_random_crop(shape_like_in, crop_shape=crop_shape,
roi_start=roi_start, roi_end=roi_end,
device='cpu'),
fn.roi_random_crop(in_shape=in_shape, crop_shape=crop_shape,
roi_start=roi_start, roi_shape=roi_shape,
device='cpu'),
fn.roi_random_crop(in_shape=in_shape, crop_shape=crop_shape,
roi_start=roi_start, roi_end=roi_end,
device='cpu'),
]
outputs = [in_shape, roi_start, roi_shape, crop_shape, *outs]
pipe.set_outputs(*outputs)
pipe.build()
for _ in range(niter):
outputs = pipe.run()
batch_size = len(outputs[0])
for s in range(batch_size):
in_shape = np.array(outputs[0][s]).tolist()
roi_start = np.array(outputs[1][s]).tolist()
roi_shape = np.array(outputs[2][s]).tolist()
crop_shape = np.array(outputs[3][s]).tolist()
def check_crop_start(crop_start, roi_start, roi_shape, crop_shape, in_shape=None):
ndim = len(crop_start)
roi_end = [roi_start[d] + roi_shape[d] for d in range(ndim)]
crop_end = [crop_start[d] + crop_shape[d] for d in range(ndim)]
for d in range(ndim):
if in_shape is not None:
assert crop_start[d] >= 0
assert crop_end[d] <= in_shape[d]
if crop_shape[d] >= roi_shape[d]:
assert crop_start[d] <= roi_start[d]
assert crop_end[d] >= roi_end[d]
else:
assert crop_start[d] >= roi_start[d]
assert crop_end[d] <= roi_end[d]
for idx in range(4, 6):
check_crop_start(
np.array(outputs[idx][s]).tolist(), roi_start, roi_shape, crop_shape)
for idx in range(6, 10):
check_crop_start(
np.array(outputs[idx][s]).tolist(), roi_start, roi_shape, crop_shape, in_shape)
def test_roi_random_crop():
batch_size = 16
niter = 3
for ndim in (2, 3):
in_shape_min = 250
in_shape_max = 300
for roi_start_min, roi_start_max, roi_extent_min, roi_extent_max, \
crop_extent_min, crop_extent_max in \
[(20, 50, 10, 20, 30, 40),
(20, 50, 100, 140, 30, 40),
(0, 1, 10, 20, 80, 100)]:
yield (check_roi_random_crop, ndim, batch_size, roi_start_min, roi_start_max,
roi_extent_min, roi_extent_max, crop_extent_min, crop_extent_max, in_shape_min,
in_shape_max, niter)
def check_roi_random_crop_error(shape_like_in=None, in_shape=None, crop_shape=None, roi_start=None,
roi_shape=None, roi_end=None, error_msg=""):
batch_size = 3
niter = 3
pipe = dali.pipeline.Pipeline(batch_size=batch_size, num_threads=4, device_id=0, seed=1234)
with pipe:
inputs = [] if shape_like_in is None else [shape_like_in]
out = fn.roi_random_crop(*inputs,
in_shape=in_shape,
crop_shape=crop_shape,
roi_start=roi_start,
roi_shape=roi_shape,
roi_end=roi_end,
device='cpu')
pipe.set_outputs(out)
with assert_raises(RuntimeError, regex=error_msg):
pipe.build()
for _ in range(niter):
pipe.run()
def test_roi_random_crop_error_incompatible_args():
in_shape = np.array([4, 4])
crop_shape = np.array([2, 2])
roi_start = np.array([1, 1])
roi_shape = np.array([1, 1])
roi_end = np.array([2, 2])
yield (check_roi_random_crop_error, np.zeros(in_shape), in_shape, crop_shape, roi_start,
roi_shape, None, "``in_shape`` argument is incompatible with providing an input.")
yield (check_roi_random_crop_error, np.zeros(in_shape), None, crop_shape, roi_start, roi_shape,
roi_end, "Either ROI end or ROI shape should be defined, but not both")
def test_roi_random_crop_error_wrong_args():
in_shape = np.array([4, 4])
crop_shape = np.array([2, 2])
roi_start = np.array([1, 1])
roi_shape = np.array([1, 1])
# Negative shape
yield (check_roi_random_crop_error, None, np.array([-4, 4]), crop_shape, roi_start, roi_shape,
None, "Input shape can't be negative.")
yield (check_roi_random_crop_error, None, in_shape, np.array([1, -1]), roi_start, roi_shape,
None, "Crop shape can't be negative")
# Out of bounds ROI
yield (check_roi_random_crop_error, None, in_shape, crop_shape, np.array([-1, -1]), roi_shape,
None, "ROI can't be out of bounds.")
yield (check_roi_random_crop_error, None, in_shape, crop_shape, roi_start, np.array([4, 4]),
None, "ROI can't be out of bounds.")
yield (check_roi_random_crop_error, None, in_shape, crop_shape, roi_start, None,
np.array([5, 5]), "ROI can't be out of bounds.")
# Out of bounds crop
yield (check_roi_random_crop_error, None, in_shape, np.array([10, 10]), roi_start, roi_shape,
None, "Cropping shape can't be bigger than the input shape.")
|
DALI-main
|
dali/test/python/operator_2/test_roi_random_crop.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
from nvidia.dali import pipeline_def
import nvidia.dali.ops as ops
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import numpy as np
import random
bbox_2d_ltrb_1 = [0.0123, 0.0123, 0.2123, 0.2123]
bbox_2d_ltrb_2 = [0.1123, 0.1123, 0.19123, 0.19123]
bbox_2d_ltrb_3 = [0.3123, 0.3123, 0.5123, 0.5123]
bbox_3d_ltrb_1 = [0.123, 0.6123, 0.6123, 0.7123, 0.7123, 0.7123]
bbox_3d_ltrb_2 = [0.1123, 0.1123, 0.1123, 0.2123, 0.2123, 0.2123]
bbox_3d_ltrb_3 = [0.7123, 0.7123, 0.7123, 0.8123, 0.8123, 0.8123]
bboxes_data = {
2: [bbox_2d_ltrb_1, bbox_2d_ltrb_2, bbox_2d_ltrb_3],
3: [bbox_3d_ltrb_1, bbox_3d_ltrb_2, bbox_3d_ltrb_3]
}
class BBoxDataIterator():
def __init__(self, n, batch_size, ndim=2, produce_labels=False):
self.batch_size = batch_size
self.ndim = ndim
self.produce_labels = produce_labels
self.num_outputs = 2 if produce_labels else 1
self.n = n
self.i = 0
def __len__(self):
return self.n
def __iter__(self):
# return a copy, so that the iteration number doesn't collide
return BBoxDataIterator(self.n, self.batch_size, self.ndim, self.produce_labels)
def __next__(self):
boxes = []
labels = []
bboxes = bboxes_data[self.ndim]
if self.i % 2 == 0:
boxes.append(np.array([bboxes[0], bboxes[1], bboxes[2]], dtype=np.float32))
labels.append(np.array([1, 2, 3], dtype=np.int32))
if self.batch_size > 1:
boxes.append(np.array([bboxes[2], bboxes[1]], dtype=np.float32))
labels.append(np.array([2, 1], dtype=np.int32))
for _ in range(self.batch_size - 2):
boxes.append(np.array([bboxes[2]], dtype=np.float32))
labels.append(np.array([3], dtype=np.int32))
else:
boxes.append(np.array([bboxes[2]], dtype=np.float32))
labels.append(np.array([3], dtype=np.int32))
if self.batch_size > 1:
boxes.append(np.array([bboxes[1], bboxes[2], bboxes[0]], dtype=np.float32))
labels.append(np.array([2, 3, 1], dtype=np.int32))
for i in range(self.batch_size - 2):
boxes.append(np.array([bboxes[1]], dtype=np.float32))
labels.append(np.array([2], dtype=np.int32))
if self.i < self.n:
self.i = self.i + 1
outputs = [boxes]
if self.produce_labels:
outputs.append(labels)
return outputs
else:
self.i = 0
raise StopIteration
next = __next__
class RandomBBoxCropSynthDataPipeline(Pipeline):
def __init__(self, device, batch_size,
bbox_source,
thresholds=[0, 0.01, 0.05, 0.1, 0.15],
threshold_type='iou',
scaling=[0.3, 1.0],
aspect_ratio=[0.5, 2.0],
bbox_layout="xyXY",
num_attempts=100,
allow_no_crop=False,
input_shape=None,
crop_shape=None,
all_boxes_above_threshold=False,
output_bbox_indices=False,
num_threads=1, device_id=0, num_gpus=1):
super(RandomBBoxCropSynthDataPipeline, self).__init__(
batch_size, num_threads, device_id, seed=1234)
self.device = device
self.bbox_source = bbox_source
self.bbox_crop = ops.RandomBBoxCrop(
device=self.device,
aspect_ratio=aspect_ratio,
scaling=scaling,
thresholds=thresholds,
threshold_type=threshold_type,
bbox_layout=bbox_layout,
num_attempts=num_attempts,
allow_no_crop=allow_no_crop,
input_shape=input_shape,
crop_shape=crop_shape,
all_boxes_above_threshold=all_boxes_above_threshold,
output_bbox_indices=output_bbox_indices
)
def define_graph(self):
inputs = fn.external_source(source=self.bbox_source,
num_outputs=self.bbox_source.num_outputs)
outputs = self.bbox_crop(*inputs)
return [inputs[0], *outputs]
def crop_contains(crop_anchor, crop_shape, point):
ndim = len(crop_shape)
assert len(crop_shape) == ndim
assert len(point) == ndim
point = np.array(point)
crop_anchor = np.array(crop_anchor)
crop_shape = np.array(crop_shape)
if np.any(np.less(point, crop_anchor)) or np.any(np.greater(point, (crop_anchor + crop_shape))):
return False
return True
def filter_by_centroid(crop_anchor, crop_shape, bboxes):
ndim = len(crop_shape)
nboxes = bboxes.shape[0]
indexes = []
for i in range(nboxes):
bbox = bboxes[i]
centroid = [0.5 * (bbox[d] + bbox[ndim + d]) for d in range(ndim)]
if crop_contains(crop_anchor, crop_shape, centroid):
indexes.append(i)
filtered_boxes = np.array(bboxes[indexes, :])
return filtered_boxes
def map_box(bbox, crop_anchor, crop_shape):
ndim = int(len(bbox) / 2)
assert len(crop_anchor) == ndim
assert len(crop_shape) == ndim
new_bbox = np.array(bbox)
for d in range(ndim):
c_start = crop_anchor[d]
c_end = crop_anchor[d] + crop_shape[d]
b_start = bbox[d]
b_end = bbox[ndim + d]
rel_extent = c_end - c_start
n_start = (max(c_start, b_start) - c_start) / rel_extent
n_end = (min(c_end, b_end) - c_start) / rel_extent
new_bbox[d] = max(0.0, min(1.0, n_start))
new_bbox[ndim + d] = max(0.0, min(1.0, n_end))
return new_bbox
def check_processed_bboxes(crop_anchor, crop_shape, original_boxes, processed_boxes,
bbox_indices=None):
if bbox_indices is not None:
filtered_boxes = np.array(original_boxes[bbox_indices])
else:
filtered_boxes = filter_by_centroid(crop_anchor, crop_shape, original_boxes)
assert len(original_boxes) >= len(filtered_boxes)
assert len(filtered_boxes) == len(processed_boxes)
nboxes = len(filtered_boxes)
for i in range(nboxes):
box = filtered_boxes[i]
processed_box = processed_boxes[i]
expected_box = map_box(box, crop_anchor, crop_shape)
assert np.allclose(expected_box, processed_box, atol=1e-6)
def check_crop_dims_variable_size(anchor, shape, scaling, aspect_ratio):
ndim = len(shape)
k = 0
nranges = len(aspect_ratio) / 2
max_extent = 0.0
for d in range(ndim):
max_extent = shape[d] if shape[d] > max_extent else max_extent
assert max_extent >= scaling[0] or np.isclose(max_extent, scaling[0])
assert max_extent <= scaling[1] or np.isclose(max_extent, scaling[1])
for d in range(ndim):
assert anchor[d] >= 0.0 and anchor[d] <= 1.0, anchor
assert anchor[d] + shape[d] > 0.0 and anchor[d] + shape[d] <= 1.0
for d2 in range(d + 1, ndim):
ar = shape[d] / shape[d2]
ar_min = aspect_ratio[k * 2]
ar_max = aspect_ratio[k * 2 + 1]
if ar_min == ar_max:
assert np.isclose(
ar, ar_min), "ar {}/{} = {} is not close to ar_min={}".format(d, d2, ar, ar_min)
else:
assert ar >= aspect_ratio[k * 2] and ar <= aspect_ratio[k * 2 + 1]
k = int((k + 1) % nranges)
def check_crop_dims_fixed_size(anchor, shape, expected_crop_shape, input_shape):
ndim = len(shape)
for d in range(ndim):
anchor_rng = sorted((0.0, input_shape[d] - expected_crop_shape[d]))
assert anchor[d] >= anchor_rng[0] and anchor[d] <= anchor_rng[1], \
f"Expected anchor[{d}] to be within the range {anchor_rng}. Got: {anchor[d]}"
assert shape[d] == expected_crop_shape[d], "{} != {}".format(shape, expected_crop_shape)
def check_random_bbox_crop_variable_shape(batch_size, ndim, scaling, aspect_ratio, use_labels,
output_bbox_indices):
bbox_source = BBoxDataIterator(100, batch_size, ndim, produce_labels=use_labels)
bbox_layout = "xyzXYZ" if ndim == 3 else "xyXY"
pipe = RandomBBoxCropSynthDataPipeline(device='cpu', batch_size=batch_size,
bbox_source=bbox_source,
bbox_layout=bbox_layout,
scaling=scaling, aspect_ratio=aspect_ratio,
input_shape=None, crop_shape=None,
output_bbox_indices=output_bbox_indices)
pipe.build()
for i in range(100):
outputs = pipe.run()
for sample in range(batch_size):
in_boxes = outputs[0].at(sample)
out_crop_anchor = outputs[1].at(sample)
out_crop_shape = outputs[2].at(sample)
out_boxes = outputs[3].at(sample)
check_crop_dims_variable_size(out_crop_anchor, out_crop_shape, scaling, aspect_ratio)
bbox_indices_out_idx = 4 if not use_labels else 5
bbox_indices = outputs[bbox_indices_out_idx].at(sample) if output_bbox_indices else None
check_processed_bboxes(out_crop_anchor, out_crop_shape,
in_boxes, out_boxes, bbox_indices)
def test_random_bbox_crop_variable_shape():
random.seed(1234)
aspect_ratio_ranges = {
2: [[0.01, 100], [0.5, 2.0], [1.0, 1.0]],
3: [[0.5, 2.0, 0.6, 2.1, 0.4, 1.9], [1.0, 1.0], [0.5, 0.5, 0.25, 0.25, 0.5, 0.5]]
}
for batch_size in [3]:
for ndim in [2, 3]:
for scaling in [[0.3, 0.5], [0.1, 0.3], [0.9, 0.99]]:
for aspect_ratio in aspect_ratio_ranges[ndim]:
use_labels = random.choice([True, False])
out_bbox_indices = random.choice([True, False])
yield check_random_bbox_crop_variable_shape, \
batch_size, ndim, scaling, aspect_ratio, use_labels, out_bbox_indices
def check_random_bbox_crop_fixed_shape(batch_size, ndim, crop_shape, input_shape, use_labels):
bbox_source = BBoxDataIterator(100, batch_size, ndim, produce_labels=use_labels)
bbox_layout = "xyzXYZ" if ndim == 3 else "xyXY"
pipe = RandomBBoxCropSynthDataPipeline(device='cpu', batch_size=batch_size,
bbox_source=bbox_source,
bbox_layout=bbox_layout,
scaling=None, aspect_ratio=None,
input_shape=input_shape, crop_shape=crop_shape,
all_boxes_above_threshold=False)
pipe.build()
for i in range(100):
outputs = pipe.run()
for sample in range(batch_size):
in_boxes = outputs[0].at(sample)
out_crop_anchor = outputs[1].at(sample)
out_crop_shape = outputs[2].at(sample)
out_boxes = outputs[3].at(sample)
check_crop_dims_fixed_size(out_crop_anchor, out_crop_shape, crop_shape, input_shape)
rel_out_crop_anchor = [out_crop_anchor[d] / input_shape[d] for d in range(ndim)]
rel_out_crop_shape = [out_crop_shape[d] / input_shape[d] for d in range(ndim)]
check_processed_bboxes(rel_out_crop_anchor, rel_out_crop_shape, in_boxes, out_boxes)
def test_random_bbox_crop_fixed_shape():
input_shapes = {
2: [[400, 300]],
3: [[400, 300, 64]]
}
crop_shapes = {
2: [[100, 50], [400, 300], [600, 400]],
3: [[100, 50, 32], [400, 300, 64], [600, 400, 48]]
}
for batch_size in [3]:
for ndim in [2, 3]:
for input_shape in input_shapes[ndim]:
for crop_shape in crop_shapes[ndim]:
for use_labels in [True, False]:
yield check_random_bbox_crop_fixed_shape, \
batch_size, ndim, crop_shape, input_shape, use_labels
def check_random_bbox_crop_overlap(batch_size, ndim, crop_shape, input_shape, use_labels):
bbox_source = BBoxDataIterator(100, batch_size, ndim, produce_labels=use_labels)
bbox_layout = "xyzXYZ" if ndim == 3 else "xyXY"
pipe = RandomBBoxCropSynthDataPipeline(device='cpu', batch_size=batch_size,
thresholds=[1.0],
threshold_type='overlap',
num_attempts=1000,
bbox_source=bbox_source,
bbox_layout=bbox_layout,
scaling=None, aspect_ratio=None,
input_shape=input_shape, crop_shape=crop_shape,
all_boxes_above_threshold=False)
pipe.build()
for _ in range(100):
outputs = pipe.run()
for sample in range(batch_size):
out_crop_anchor = outputs[1].at(sample)
out_crop_shape = outputs[2].at(sample)
rel_out_crop_anchor = [out_crop_anchor[d] / input_shape[d] for d in range(ndim)]
rel_out_crop_shape = [out_crop_shape[d] / input_shape[d] for d in range(ndim)]
in_boxes = outputs[0].at(sample)
nboxes = in_boxes.shape[0]
at_least_one_box_in = False
for box_idx in range(nboxes):
box = in_boxes[box_idx]
is_box_in = True
for d in range(ndim):
if rel_out_crop_anchor[d] > box[d] or \
(rel_out_crop_anchor[d] + rel_out_crop_shape[d]) < box[ndim + d]:
is_box_in = False
break
if is_box_in:
at_least_one_box_in = True
break
assert at_least_one_box_in
def test_random_bbox_crop_overlap():
input_shapes = {
2: [[400, 300]],
3: [[400, 300, 64]]
}
crop_shapes = {
2: [[150, 150], [400, 300]],
3: [[50, 50, 32], [400, 300, 64]]
}
for batch_size in [3]:
for ndim in [2, 3]:
for input_shape in input_shapes[ndim]:
for crop_shape in crop_shapes[ndim]:
for use_labels in [True, False]:
yield check_random_bbox_crop_overlap, \
batch_size, ndim, crop_shape, input_shape, use_labels
def test_random_bbox_crop_no_labels():
batch_size = 3
pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0)
test_box_shape = [200, 4]
def get_boxes():
out = [
(np.random.randint(0, 255, size=test_box_shape, dtype=np.uint8) / 255)
.astype(dtype=np.float32) for _ in range(batch_size)]
return out
boxes = fn.external_source(source=get_boxes)
processed = fn.random_bbox_crop(boxes,
aspect_ratio=[0.5, 2.0],
thresholds=[0.1, 0.3, 0.5],
scaling=[0.8, 1.0],
bbox_layout="xyXY")
pipe.set_outputs(*processed)
pipe.build()
for _ in range(3):
pipe.run()
def _testimpl_random_bbox_crop_square(use_input_shape):
batch_size = 3
bbox_source = BBoxDataIterator(100, batch_size, 2, produce_labels=False)
@pipeline_def(num_threads=1, batch_size=batch_size, device_id=0, seed=1234)
def random_bbox_crop_fixed_aspect_ratio():
in_sh = fn.random.uniform(range=(400, 600), shape=(2,), dtype=types.INT32)
inputs = fn.external_source(source=bbox_source, num_outputs=bbox_source.num_outputs)
outputs = fn.random_bbox_crop(
*inputs,
device='cpu',
aspect_ratio=(1.0, 1.0),
scaling=(0.5, 0.8),
thresholds=[0.0],
threshold_type='iou',
bbox_layout="xyXY",
total_num_attempts=100,
allow_no_crop=False,
input_shape=in_sh if use_input_shape else None,
)
return in_sh, outputs[1]
pipe = random_bbox_crop_fixed_aspect_ratio()
pipe.build()
for _ in range(3):
outputs = pipe.run()
for sample in range(batch_size):
in_shape = outputs[0].at(sample)
out_crop_shape = outputs[1].at(sample)
if use_input_shape:
np.testing.assert_allclose(
in_shape[0] * out_crop_shape[0], in_shape[1] * out_crop_shape[1], rtol=1e-06)
else:
np.testing.assert_allclose(out_crop_shape[0], out_crop_shape[1], rtol=1e-06)
def test_random_bbox_crop_square():
for use_input_shape in [False, True]:
yield _testimpl_random_bbox_crop_square, use_input_shape
|
DALI-main
|
dali/test/python/operator_2/test_random_bbox_crop.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
from nvidia.dali import fn, pipeline_def
import math
from test_utils import compare_pipelines, as_array, RandomDataIterator, RandomlyShapedDataIterator
import itertools
from nose2.tools import params
import numpy as np
def transpose_func(image, permutation=(1, 0, 2)):
return image.transpose(permutation)
class TransposePipeline(Pipeline):
def __init__(self, device, batch_size, layout, iterator, num_threads=1, device_id=0,
permutation=(1, 0, 2), transpose_layout=False, out_layout_arg=None):
super(TransposePipeline, self).__init__(batch_size,
num_threads,
device_id)
self.device = device
self.layout = layout
self.iterator = iterator
self.inputs = ops.ExternalSource()
if out_layout_arg:
self.transpose = ops.Transpose(device=self.device,
perm=permutation,
transpose_layout=transpose_layout,
output_layout=out_layout_arg)
else:
self.transpose = ops.Transpose(device=self.device,
perm=permutation,
transpose_layout=transpose_layout)
def define_graph(self):
self.data = self.inputs()
out = self.data.gpu() if self.device == 'gpu' else self.data
out = self.transpose(out)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
class PythonOpPipeline(Pipeline):
def __init__(self, function, batch_size, layout, iterator, num_threads=1, device_id=0):
super(PythonOpPipeline, self).__init__(batch_size,
num_threads,
device_id,
exec_async=False,
exec_pipelined=False)
self.layout = layout
self.iterator = iterator
self.inputs = ops.ExternalSource()
self.oper = ops.PythonFunction(function=function)
def define_graph(self):
self.data = self.inputs()
out = self.oper(self.data)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
def check_transpose_vs_numpy(device, batch_size, dim, total_volume, permutation):
max_shape = [int(math.pow(total_volume / batch_size, 1 / dim))] * dim
print("Testing", device, "backend with batch of", batch_size, "max size", max_shape)
print("permutation ", permutation)
eii1 = RandomlyShapedDataIterator(batch_size, max_shape=max_shape)
eii2 = RandomlyShapedDataIterator(batch_size, max_shape=max_shape)
compare_pipelines(
TransposePipeline(device, batch_size, "", iter(eii1), permutation=permutation),
PythonOpPipeline(lambda x: transpose_func(x, permutation), batch_size, "", iter(eii2)),
batch_size=batch_size, N_iterations=3
)
def all_permutations(n):
return itertools.permutations(range(n))
def test_transpose_vs_numpy():
for device in ['cpu', 'gpu']:
for batch_size in [1, 3, 10, 100]:
for dim in range(2, 5):
for permutation in all_permutations(dim):
yield check_transpose_vs_numpy, device, batch_size, dim, 1000000, permutation
def check_transpose_layout(device, batch_size, shape, in_layout, permutation,
transpose_layout, out_layout_arg):
eii = RandomDataIterator(batch_size, shape=shape)
pipe = TransposePipeline(device, batch_size, in_layout, iter(eii),
permutation=permutation,
transpose_layout=transpose_layout,
out_layout_arg=out_layout_arg)
pipe.build()
out = pipe.run()
expected_out_layout = in_layout
if out_layout_arg:
expected_out_layout = out_layout_arg
elif transpose_layout:
expected_out_layout = "".join([list(in_layout)[d] for d in permutation])
else:
expected_out_layout = "" if in_layout is None else in_layout
assert out[0].layout() == expected_out_layout
def test_transpose_layout():
batch_size = 3
for device in {'cpu', 'gpu'}:
for batch_size in (1, 3):
for shape in [(600, 400, 3), (600, 400, 1)]:
for permutation, in_layout, transpose_layout, out_layout_arg in \
[((2, 0, 1), "HWC", True, None),
((2, 0, 1), "HWC", True, "CHW"),
((2, 0, 1), "HWC", False, "CHW"),
((1, 0, 2), None, False, None),
((1, 0, 2), "XYZ", True, None),
((1, 0, 2), None, None, "ABC")]:
yield check_transpose_layout, device, batch_size, shape, \
in_layout, permutation, transpose_layout, out_layout_arg
@params(*itertools.product(('cpu', 'gpu'), ((10, 20, 3), (10, 20), (1,), (), (3, 3, 2, 2, 3))))
def test_transpose_default(device, shape):
@pipeline_def(batch_size=1, num_threads=3, device_id=0)
def pipe():
data = fn.random.uniform(range=[0, 255], shape=shape, device=device)
ndim = len(shape) or 0
perm = [d-1 for d in range(ndim, 0, -1)]
return fn.transpose(data), fn.transpose(data, perm=perm)
p = pipe()
p.build()
out_default, out_explicit = [as_array(o[0]) for o in p.run()]
np.testing.assert_array_equal(out_explicit, out_default)
|
DALI-main
|
dali/test/python/operator_2/test_transpose.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.